summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-26 18:42:24 -0400
commitc1a45ebbb0530e3d0201321d725527f1eb83ffce (patch)
treef523079dc5ded3052eefbdcaae424b7502df5b25 /jstests/noPassthrough
parentc9599d8610c3da0b7c3da65667aff821063cf5b9 (diff)
downloadmongo-c1a45ebbb0530e3d0201321d725527f1eb83ffce.tar.gz
Apply formatting per `clang-format-7.0.1`
Diffstat (limited to 'jstests/noPassthrough')
-rw-r--r--jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js53
-rw-r--r--jstests/noPassthrough/absent_ns_field_in_index_specs.js93
-rw-r--r--jstests/noPassthrough/afterClusterTime_committed_reads.js113
-rw-r--r--jstests/noPassthrough/after_cluster_time.js110
-rw-r--r--jstests/noPassthrough/agg_explain_read_concern.js108
-rw-r--r--jstests/noPassthrough/aggregation_cursor_invalidations.js592
-rw-r--r--jstests/noPassthrough/aggregation_log_namespace.js97
-rw-r--r--jstests/noPassthrough/aggregation_zero_batchsize.js134
-rw-r--r--jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js131
-rw-r--r--jstests/noPassthrough/apply_ops_mode.js166
-rw-r--r--jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js70
-rw-r--r--jstests/noPassthrough/atomic_rename_collection.js82
-rw-r--r--jstests/noPassthrough/auth_reject_mismatching_logical_times.js109
-rw-r--r--jstests/noPassthrough/auto_retry_on_network_error.js213
-rw-r--r--jstests/noPassthrough/backup_restore_fsync_lock.js8
-rw-r--r--jstests/noPassthrough/backup_restore_rolling.js40
-rw-r--r--jstests/noPassthrough/backup_restore_stop_start.js4
-rw-r--r--jstests/noPassthrough/bind_all_ipv6.js10
-rw-r--r--jstests/noPassthrough/bind_ip_all.js32
-rw-r--r--jstests/noPassthrough/bind_localhost.js20
-rw-r--r--jstests/noPassthrough/block_compressor_options.js60
-rw-r--r--jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js76
-rw-r--r--jstests/noPassthrough/change_stream_failover.js161
-rw-r--r--jstests/noPassthrough/change_stream_resume_before_add_shard.js212
-rw-r--r--jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js56
-rw-r--r--jstests/noPassthrough/change_stream_transaction.js511
-rw-r--r--jstests/noPassthrough/change_streams_collation_chunk_migration.js95
-rw-r--r--jstests/noPassthrough/change_streams_require_majority_read_concern.js157
-rw-r--r--jstests/noPassthrough/change_streams_required_privileges.js602
-rw-r--r--jstests/noPassthrough/change_streams_resume_at_same_clustertime.js95
-rw-r--r--jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js148
-rw-r--r--jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js150
-rw-r--r--jstests/noPassthrough/change_streams_shell_helper_resume_token.js150
-rw-r--r--jstests/noPassthrough/change_streams_update_lookup_collation.js189
-rw-r--r--jstests/noPassthrough/characterize_index_builds_on_restart.js412
-rw-r--r--jstests/noPassthrough/child_op_numyields.js188
-rw-r--r--jstests/noPassthrough/client_metadata_log.js90
-rw-r--r--jstests/noPassthrough/client_metadata_slowlog.js46
-rw-r--r--jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js64
-rw-r--r--jstests/noPassthrough/coll_mod_apply_ops.js68
-rw-r--r--jstests/noPassthrough/collation_clone_collection.js120
-rw-r--r--jstests/noPassthrough/commands_handle_kill.js373
-rw-r--r--jstests/noPassthrough/commands_preserve_exec_error_code.js73
-rw-r--r--jstests/noPassthrough/commit_quorum.js176
-rw-r--r--jstests/noPassthrough/compression_options.js77
-rw-r--r--jstests/noPassthrough/configExpand_exec_digest.js109
-rw-r--r--jstests/noPassthrough/configExpand_exec_noexpand.js36
-rw-r--r--jstests/noPassthrough/configExpand_exec_permissions.js40
-rw-r--r--jstests/noPassthrough/configExpand_exec_timeeout.js46
-rw-r--r--jstests/noPassthrough/configExpand_exec_values.js43
-rw-r--r--jstests/noPassthrough/configExpand_exec_wholeconfig.js16
-rw-r--r--jstests/noPassthrough/configExpand_rest_noexpand.js48
-rw-r--r--jstests/noPassthrough/configExpand_rest_permissions.js44
-rw-r--r--jstests/noPassthrough/configExpand_rest_timeout.js52
-rw-r--r--jstests/noPassthrough/configExpand_rest_values.js59
-rw-r--r--jstests/noPassthrough/configExpand_rest_wholeconfig.js26
-rw-r--r--jstests/noPassthrough/count_helper_read_preference.js73
-rw-r--r--jstests/noPassthrough/create_view_does_not_take_database_X.js32
-rw-r--r--jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js106
-rw-r--r--jstests/noPassthrough/crud_timestamps.js207
-rw-r--r--jstests/noPassthrough/currentop_active_cursor.js200
-rw-r--r--jstests/noPassthrough/currentop_active_transaction.js355
-rw-r--r--jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js100
-rw-r--r--jstests/noPassthrough/currentop_includes_await_time.js92
-rw-r--r--jstests/noPassthrough/currentop_query.js1112
-rw-r--r--jstests/noPassthrough/currentop_transaction_metrics.js102
-rw-r--r--jstests/noPassthrough/cycle_detection_test.js170
-rw-r--r--jstests/noPassthrough/data_consistency_checks.js355
-rw-r--r--jstests/noPassthrough/dbhash_capped_collection.js96
-rw-r--r--jstests/noPassthrough/devnull.js14
-rw-r--r--jstests/noPassthrough/directoryperdb.js52
-rw-r--r--jstests/noPassthrough/disable_majority_reads_restart.js118
-rw-r--r--jstests/noPassthrough/disabled_test_parameters.js60
-rw-r--r--jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js36
-rw-r--r--jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js91
-rw-r--r--jstests/noPassthrough/document_count_functions.js78
-rw-r--r--jstests/noPassthrough/drop_connections_replSet.js94
-rw-r--r--jstests/noPassthrough/drop_connections_sharded.js90
-rw-r--r--jstests/noPassthrough/drop_view_does_not_take_database_X.js36
-rw-r--r--jstests/noPassthrough/dropcollection_duplicate_fields.js33
-rw-r--r--jstests/noPassthrough/dropdatabase_respect_maxtimems.js92
-rw-r--r--jstests/noPassthrough/durable_view_catalog.js145
-rw-r--r--jstests/noPassthrough/end_sessions_command.js173
-rw-r--r--jstests/noPassthrough/exchange_in_session.js126
-rw-r--r--jstests/noPassthrough/exhaust_option_disallowed_in_session.js38
-rw-r--r--jstests/noPassthrough/exit_logging.js185
-rw-r--r--jstests/noPassthrough/failcommand_failpoint_not_parallel.js37
-rw-r--r--jstests/noPassthrough/feature_compatibility_version.js78
-rw-r--r--jstests/noPassthrough/filemd5_kill_during_yield.js86
-rw-r--r--jstests/noPassthrough/find_by_uuid_and_rename.js107
-rw-r--r--jstests/noPassthrough/flow_control_logging.js78
-rw-r--r--jstests/noPassthrough/flow_control_replica_set.js86
-rw-r--r--jstests/noPassthrough/ftdc_connection_pool.js42
-rw-r--r--jstests/noPassthrough/ftdc_setdirectory.js218
-rw-r--r--jstests/noPassthrough/ftdc_setparam.js24
-rw-r--r--jstests/noPassthrough/geo_full.js966
-rw-r--r--jstests/noPassthrough/geo_mnypts_plus_fields.js156
-rw-r--r--jstests/noPassthrough/geo_near_random1.js26
-rw-r--r--jstests/noPassthrough/geo_near_random2.js43
-rw-r--r--jstests/noPassthrough/global_operation_latency_histogram.js325
-rw-r--r--jstests/noPassthrough/global_transaction_latency_histogram.js221
-rw-r--r--jstests/noPassthrough/hostname_bind_ips.js28
-rw-r--r--jstests/noPassthrough/http_client_keep_alive.js108
-rw-r--r--jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js98
-rw-r--r--jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js92
-rw-r--r--jstests/noPassthrough/hybrid_index_with_updates.js202
-rw-r--r--jstests/noPassthrough/hybrid_partial_geo_index.js100
-rw-r--r--jstests/noPassthrough/hybrid_partial_index_update.js78
-rw-r--r--jstests/noPassthrough/hybrid_sparse_compound_geo_index.js66
-rw-r--r--jstests/noPassthrough/hybrid_unique_index_with_updates.js303
-rw-r--r--jstests/noPassthrough/hyphenated_database_name.js28
-rw-r--r--jstests/noPassthrough/ignore_notablescan.js106
-rw-r--r--jstests/noPassthrough/implicit_sessions.js425
-rw-r--r--jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js197
-rw-r--r--jstests/noPassthrough/index_killop_standalone.js64
-rw-r--r--jstests/noPassthrough/index_partial_no_explain_cmds.js106
-rw-r--r--jstests/noPassthrough/index_version_autoupgrade.js251
-rw-r--r--jstests/noPassthrough/index_version_v2.js227
-rw-r--r--jstests/noPassthrough/indexbg1.js238
-rw-r--r--jstests/noPassthrough/indexbg2.js260
-rw-r--r--jstests/noPassthrough/indexbg_drop.js156
-rw-r--r--jstests/noPassthrough/indexbg_killop_apply_ops.js105
-rw-r--r--jstests/noPassthrough/indexbg_killop_primary.js83
-rw-r--r--jstests/noPassthrough/indexbg_killop_secondary.js84
-rw-r--r--jstests/noPassthrough/indexbg_shutdown.js182
-rw-r--r--jstests/noPassthrough/initial_sync_wt_cache_full.js107
-rw-r--r--jstests/noPassthrough/inmem_config_str.js22
-rw-r--r--jstests/noPassthrough/inmem_full.js140
-rw-r--r--jstests/noPassthrough/internal_validate_features_as_master.js44
-rw-r--r--jstests/noPassthrough/jsHeapLimit.js38
-rw-r--r--jstests/noPassthrough/js_exceptions.js208
-rw-r--r--jstests/noPassthrough/js_protection.js143
-rw-r--r--jstests/noPassthrough/js_protection_roundtrip.js78
-rw-r--r--jstests/noPassthrough/json_schema_ignore_unknown_keywords.js105
-rw-r--r--jstests/noPassthrough/kill_pinned_cursor.js179
-rw-r--r--jstests/noPassthrough/kill_sessions.js14
-rw-r--r--jstests/noPassthrough/killop.js135
-rw-r--r--jstests/noPassthrough/latency_includes_lock_acquisition_time.js259
-rw-r--r--jstests/noPassthrough/launcher_test.js43
-rw-r--r--jstests/noPassthrough/libs/backup_restore.js4
-rw-r--r--jstests/noPassthrough/libs/configExpand/lib.js4
-rw-r--r--jstests/noPassthrough/libs/index_build.js22
-rw-r--r--jstests/noPassthrough/list_databases_and_rename_collection.js100
-rw-r--r--jstests/noPassthrough/list_indexes_ready_and_in_progress.js50
-rw-r--r--jstests/noPassthrough/list_indexes_with_build_uuids.js120
-rw-r--r--jstests/noPassthrough/lock_file.js38
-rw-r--r--jstests/noPassthrough/lock_file_fail_to_open.js34
-rw-r--r--jstests/noPassthrough/lock_stats.js106
-rw-r--r--jstests/noPassthrough/lock_stats_suboperation_curop.js102
-rw-r--r--jstests/noPassthrough/lock_stats_suboperation_logs.js134
-rw-r--r--jstests/noPassthrough/log_and_profile_query_hash.js288
-rw-r--r--jstests/noPassthrough/log_find_getmore.js244
-rw-r--r--jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js873
-rw-r--r--jstests/noPassthrough/logical_session_cache_find_getmore.js40
-rw-r--r--jstests/noPassthrough/logical_session_cursor_checks.js170
-rw-r--r--jstests/noPassthrough/loglong.js70
-rw-r--r--jstests/noPassthrough/lookup_max_intermediate_size.js167
-rw-r--r--jstests/noPassthrough/low_js_heap_limit.js20
-rw-r--r--jstests/noPassthrough/match_expression_optimization_failpoint.js54
-rw-r--r--jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js24
-rw-r--r--jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js99
-rw-r--r--jstests/noPassthrough/max_bson_depth_parameter.js54
-rw-r--r--jstests/noPassthrough/max_conns_override.js76
-rw-r--r--jstests/noPassthrough/max_time_ms_repl_targeting.js98
-rw-r--r--jstests/noPassthrough/member_id_too_large.js48
-rw-r--r--jstests/noPassthrough/merge_max_time_ms.js483
-rw-r--r--jstests/noPassthrough/minvalid2.js4
-rw-r--r--jstests/noPassthrough/mongoebench_test.js92
-rw-r--r--jstests/noPassthrough/mongos_exhausts_stale_config_retries.js113
-rw-r--r--jstests/noPassthrough/nested_tojson.js49
-rw-r--r--jstests/noPassthrough/non_atomic_apply_ops_logging.js136
-rw-r--r--jstests/noPassthrough/noncapped_oplog_creation.js48
-rw-r--r--jstests/noPassthrough/ns1.js78
-rw-r--r--jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js230
-rw-r--r--jstests/noPassthrough/out_majority_read_replset.js76
-rw-r--r--jstests/noPassthrough/out_max_time_ms.js223
-rw-r--r--jstests/noPassthrough/out_merge_majority_read.js344
-rw-r--r--jstests/noPassthrough/parse_zone_info.js28
-rw-r--r--jstests/noPassthrough/partial_unique_indexes.js82
-rw-r--r--jstests/noPassthrough/pipeline_optimization_failpoint.js70
-rw-r--r--jstests/noPassthrough/plan_cache_index_create.js285
-rw-r--r--jstests/noPassthrough/plan_cache_list_plans_new_format.js90
-rw-r--r--jstests/noPassthrough/plan_cache_stats_agg_source.js334
-rw-r--r--jstests/noPassthrough/port_options.js96
-rw-r--r--jstests/noPassthrough/predictive_connpool.js254
-rw-r--r--jstests/noPassthrough/profile_agg_multiple_batches.js40
-rw-r--r--jstests/noPassthrough/profile_interrupted_op.js111
-rw-r--r--jstests/noPassthrough/query_knobs_validation.js313
-rw-r--r--jstests/noPassthrough/query_yield1.js148
-rw-r--r--jstests/noPassthrough/query_yield2.js260
-rw-r--r--jstests/noPassthrough/query_yield_reset_timer.js80
-rw-r--r--jstests/noPassthrough/queryable_backup_mode_incompatible_options.js76
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime.js265
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime_noop_write.js195
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js146
-rw-r--r--jstests/noPassthrough/readConcern_snapshot.js248
-rw-r--r--jstests/noPassthrough/readConcern_snapshot_mongos.js253
-rw-r--r--jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js83
-rw-r--r--jstests/noPassthrough/read_concern_helper.js42
-rw-r--r--jstests/noPassthrough/read_concern_snapshot_aggregation.js385
-rw-r--r--jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js181
-rw-r--r--jstests/noPassthrough/read_concern_snapshot_yielding.js613
-rw-r--r--jstests/noPassthrough/read_majority.js392
-rw-r--r--jstests/noPassthrough/read_majority_reads.js443
-rw-r--r--jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js77
-rw-r--r--jstests/noPassthrough/recovery_wt_cache_full.js149
-rw-r--r--jstests/noPassthrough/refresh_logical_session_cache_now.js69
-rw-r--r--jstests/noPassthrough/refresh_sessions_command.js185
-rw-r--r--jstests/noPassthrough/reindex_crash_rebuilds_id_index.js83
-rw-r--r--jstests/noPassthrough/repair_flag_transport_layer.js24
-rw-r--r--jstests/noPassthrough/repl_set_resize_oplog.js58
-rw-r--r--jstests/noPassthrough/repl_write_threads_start_param.js55
-rw-r--r--jstests/noPassthrough/replica_set_connection_error_codes.js145
-rw-r--r--jstests/noPassthrough/replica_set_connection_getmore.js80
-rw-r--r--jstests/noPassthrough/replica_set_connection_stepdown.js101
-rw-r--r--jstests/noPassthrough/report_post_batch_resume_token_mongod.js212
-rw-r--r--jstests/noPassthrough/restart_catalog_preserves_min_visible.js51
-rw-r--r--jstests/noPassthrough/restart_catalog_sharded_cluster.js414
-rw-r--r--jstests/noPassthrough/restart_node_with_bridge.js85
-rw-r--r--jstests/noPassthrough/retry_network_error_test.js67
-rw-r--r--jstests/noPassthrough/retryable_writes_standalone_api.js30
-rw-r--r--jstests/noPassthrough/rollback_wt_cache_full.js148
-rw-r--r--jstests/noPassthrough/rollback_wt_drop.js286
-rw-r--r--jstests/noPassthrough/router_transactions_metrics.js1081
-rw-r--r--jstests/noPassthrough/server_read_concern_metrics.js668
-rw-r--r--jstests/noPassthrough/server_transaction_metrics.js406
-rw-r--r--jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js342
-rw-r--r--jstests/noPassthrough/server_transaction_metrics_kill_sessions.js157
-rw-r--r--jstests/noPassthrough/server_transaction_metrics_secondary.js121
-rw-r--r--jstests/noPassthrough/server_write_concern_metrics.js411
-rw-r--r--jstests/noPassthrough/session_w0.js23
-rw-r--r--jstests/noPassthrough/sessions_collection_auto_healing.js111
-rw-r--r--jstests/noPassthrough/set_step_params.js462
-rw-r--r--jstests/noPassthrough/setshellparameter.js26
-rw-r--r--jstests/noPassthrough/shard_fixture_selftest.js74
-rw-r--r--jstests/noPassthrough/shell_appname_uri.js124
-rw-r--r--jstests/noPassthrough/shell_can_retry_writes.js252
-rw-r--r--jstests/noPassthrough/shell_can_use_read_concern.js361
-rw-r--r--jstests/noPassthrough/shell_check_program_extension.js18
-rw-r--r--jstests/noPassthrough/shell_cmd_assertions.js655
-rw-r--r--jstests/noPassthrough/shell_disable_majority_reads.js48
-rw-r--r--jstests/noPassthrough/shell_gossip_cluster_time.js215
-rw-r--r--jstests/noPassthrough/shell_helper_use_database.js42
-rw-r--r--jstests/noPassthrough/shell_history.js171
-rw-r--r--jstests/noPassthrough/shell_interactive.js35
-rw-r--r--jstests/noPassthrough/shell_load_file.js60
-rw-r--r--jstests/noPassthrough/shell_mongobridge_port_allocation.js113
-rw-r--r--jstests/noPassthrough/shell_quit.js26
-rw-r--r--jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js216
-rw-r--r--jstests/noPassthrough/shell_retry_writes_uri.js252
-rw-r--r--jstests/noPassthrough/shell_session_option_defaults.js120
-rw-r--r--jstests/noPassthrough/shutdown_while_fsync_locked.js14
-rw-r--r--jstests/noPassthrough/skip_sharding_configuration_checks.js93
-rw-r--r--jstests/noPassthrough/skip_write_conflict_retries_failpoint.js82
-rw-r--r--jstests/noPassthrough/snapshotWindow_serverParameters.js144
-rw-r--r--jstests/noPassthrough/snapshot_cursor_integrity.js302
-rw-r--r--jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js159
-rw-r--r--jstests/noPassthrough/snapshot_reads.js228
-rw-r--r--jstests/noPassthrough/socket_disconnect_kills.js402
-rw-r--r--jstests/noPassthrough/standalone_replication_recovery.js295
-rw-r--r--jstests/noPassthrough/start_session_command.js150
-rw-r--r--jstests/noPassthrough/startup_logging.js55
-rw-r--r--jstests/noPassthrough/step_down_during_drop_database.js76
-rw-r--r--jstests/noPassthrough/stepdown_query.js122
-rw-r--r--jstests/noPassthrough/sync_write.js38
-rw-r--r--jstests/noPassthrough/system_indexes.js136
-rw-r--r--jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js35
-rw-r--r--jstests/noPassthrough/thread_args.js72
-rw-r--r--jstests/noPassthrough/timestamp_index_builds.js128
-rw-r--r--jstests/noPassthrough/traffic_reading.js134
-rw-r--r--jstests/noPassthrough/traffic_reading_legacy.js108
-rw-r--r--jstests/noPassthrough/traffic_recording.js211
-rw-r--r--jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js26
-rw-r--r--jstests/noPassthrough/transaction_reaper.js307
-rw-r--r--jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js88
-rw-r--r--jstests/noPassthrough/transactions_work_with_in_memory_engine.js48
-rw-r--r--jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js25
-rw-r--r--jstests/noPassthrough/ttl_capped.js128
-rw-r--r--jstests/noPassthrough/ttl_partial_index.js46
-rw-r--r--jstests/noPassthrough/two_phase_index_build.js106
-rw-r--r--jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js83
-rw-r--r--jstests/noPassthrough/txn_override_causal_consistency.js376
-rw-r--r--jstests/noPassthrough/umask.js122
-rw-r--r--jstests/noPassthrough/unix_socket.js200
-rw-r--r--jstests/noPassthrough/unknown-set-parameter.js55
-rw-r--r--jstests/noPassthrough/unsupported_change_stream_deployments.js101
-rw-r--r--jstests/noPassthrough/update_now_clustertime_replset.js488
-rw-r--r--jstests/noPassthrough/update_now_clustertime_sharding.js538
-rw-r--r--jstests/noPassthrough/update_post_image_validation.js40
-rw-r--r--jstests/noPassthrough/update_server-5552.js54
-rw-r--r--jstests/noPassthrough/upsert_duplicate_key_retry.js152
-rw-r--r--jstests/noPassthrough/use_disk.js280
-rw-r--r--jstests/noPassthrough/utf8_paths.js52
-rw-r--r--jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js360
-rw-r--r--jstests/noPassthrough/verify_session_cache_updates.js112
-rw-r--r--jstests/noPassthrough/verify_sessions_expiration.js232
-rw-r--r--jstests/noPassthrough/view_catalog_deadlock_with_rename.js38
-rw-r--r--jstests/noPassthrough/views_legacy.js155
-rw-r--r--jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js24
-rw-r--r--jstests/noPassthrough/write_conflict_wildcard.js50
-rw-r--r--jstests/noPassthrough/write_local.js71
-rw-r--r--jstests/noPassthrough/wt_cache_full.js97
-rw-r--r--jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js110
-rw-r--r--jstests/noPassthrough/wt_cache_full_restart.js108
-rw-r--r--jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js108
-rw-r--r--jstests/noPassthrough/wt_disable_majority_reads.js50
-rw-r--r--jstests/noPassthrough/wt_index_option_defaults.js265
-rw-r--r--jstests/noPassthrough/wt_malformed_creation_string.js102
-rw-r--r--jstests/noPassthrough/wt_nojournal_skip_recovery.js154
-rw-r--r--jstests/noPassthrough/wt_nojournal_toggle.js228
-rw-r--r--jstests/noPassthrough/wt_operation_stats.js148
-rw-r--r--jstests/noPassthrough/wt_prepare_conflict.js102
-rw-r--r--jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js85
-rw-r--r--jstests/noPassthrough/wt_unclean_shutdown.js224
-rw-r--r--jstests/noPassthrough/yield_during_writes.js78
315 files changed, 23669 insertions, 23977 deletions
diff --git a/jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js b/jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js
index 432ee93eaa7..55aeff8b8bf 100644
--- a/jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js
+++ b/jstests/noPassthrough/abandon_snapshot_for_each_collection_from_db.js
@@ -9,40 +9,39 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
- const db = rst.getPrimary().getDB(dbName);
- assert.commandWorked(db.createCollection(collName));
+const db = rst.getPrimary().getDB(dbName);
+assert.commandWorked(db.createCollection(collName));
- const failpoint = 'hangBeforeGettingNextCollection';
+const failpoint = 'hangBeforeGettingNextCollection';
- // Hang 'forEachCollectionFromDb' after iterating through the first collection.
- assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+// Hang 'forEachCollectionFromDb' after iterating through the first collection.
+assert.commandWorked(db.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- TestData.failpoint = failpoint;
- const awaitCreateCollections = startParallelShell(() => {
- // The 'forEachCollectionFromDb' helper doesn't iterate in collection name order, so we need
- // to insert multiple collections to have at least one next collection when the
- // CollectionCatalog iterator is incremented.
- for (let i = 0; i < 25; i++) {
- const collName = "a".repeat(i + 1);
- assert.commandWorked(db.createCollection(collName));
- }
+TestData.failpoint = failpoint;
+const awaitCreateCollections = startParallelShell(() => {
+ // The 'forEachCollectionFromDb' helper doesn't iterate in collection name order, so we need
+ // to insert multiple collections to have at least one next collection when the
+ // CollectionCatalog iterator is incremented.
+ for (let i = 0; i < 25; i++) {
+ const collName = "a".repeat(i + 1);
+ assert.commandWorked(db.createCollection(collName));
+ }
- // Let 'forEachCollectionFromDb' iterate to the next collection.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: TestData.failpoint, mode: "off"}));
- }, rst.getPrimary().port);
+ // Let 'forEachCollectionFromDb' iterate to the next collection.
+ assert.commandWorked(db.adminCommand({configureFailPoint: TestData.failpoint, mode: "off"}));
+}, rst.getPrimary().port);
- assert.commandWorked(db.stats());
- awaitCreateCollections();
+assert.commandWorked(db.stats());
+awaitCreateCollections();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/absent_ns_field_in_index_specs.js b/jstests/noPassthrough/absent_ns_field_in_index_specs.js
index 07477fdd1e4..4428415e1b3 100644
--- a/jstests/noPassthrough/absent_ns_field_in_index_specs.js
+++ b/jstests/noPassthrough/absent_ns_field_in_index_specs.js
@@ -9,67 +9,70 @@
* @tags: [requires_replication, requires_persistence]
*/
(function() {
- 'use strict';
+'use strict';
- const dbName = 'test';
- const collName = 'absent_ns';
+const dbName = 'test';
+const collName = 'absent_ns';
- let replSet = new ReplSetTest({name: 'absentNsField', nodes: 2});
- replSet.startSet();
- replSet.initiate();
+let replSet = new ReplSetTest({name: 'absentNsField', nodes: 2});
+replSet.startSet();
+replSet.initiate();
- const primary = replSet.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB.getCollection(collName);
+const primary = replSet.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB.getCollection(collName);
- const secondary = replSet.getSecondary();
- const secondaryDB = secondary.getDB(dbName);
+const secondary = replSet.getSecondary();
+const secondaryDB = secondary.getDB(dbName);
- // The primary will not generate the 'ns' field for index specs, but the secondary will.
- assert.commandWorked(primary.getDB('admin').runCommand(
- {setParameter: 1, disableIndexSpecNamespaceGeneration: 1}));
+// The primary will not generate the 'ns' field for index specs, but the secondary will.
+assert.commandWorked(
+ primary.getDB('admin').runCommand({setParameter: 1, disableIndexSpecNamespaceGeneration: 1}));
- assert.commandWorked(primaryColl.insert({x: 100}));
- assert.commandWorked(primaryColl.createIndex({x: 1}));
+assert.commandWorked(primaryColl.insert({x: 100}));
+assert.commandWorked(primaryColl.createIndex({x: 1}));
- replSet.awaitReplication();
+replSet.awaitReplication();
- let specPrimary =
- assert.commandWorked(primaryDB.runCommand({listIndexes: collName})).cursor.firstBatch[1];
- let specSecondary =
- assert.commandWorked(secondaryDB.runCommand({listIndexes: collName})).cursor.firstBatch[1];
+let specPrimary =
+ assert.commandWorked(primaryDB.runCommand({listIndexes: collName})).cursor.firstBatch[1];
+let specSecondary =
+ assert.commandWorked(secondaryDB.runCommand({listIndexes: collName})).cursor.firstBatch[1];
- assert.eq(false, specPrimary.hasOwnProperty('ns'));
- assert.eq(true, specSecondary.hasOwnProperty('ns'));
- assert.eq(dbName + '.' + collName, specSecondary.ns);
+assert.eq(false, specPrimary.hasOwnProperty('ns'));
+assert.eq(true, specSecondary.hasOwnProperty('ns'));
+assert.eq(dbName + '.' + collName, specSecondary.ns);
- replSet.stopSet(/*signal=*/null, /*forRestart=*/true);
+replSet.stopSet(/*signal=*/null, /*forRestart=*/true);
- // The primaries index spec has no 'ns' field and the secondaries index spec does have the 'ns'
- // field. Restart the nodes as standalone and ensure that the primaries index spec gets updated
- // with the 'ns' field. No changes should be necessary to the secondaries index spec, but
- // verify that it still has the 'ns' field.
- const options = {dbpath: primary.dbpath, noCleanData: true};
- let conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
+// The primaries index spec has no 'ns' field and the secondaries index spec does have the 'ns'
+// field. Restart the nodes as standalone and ensure that the primaries index spec gets updated
+// with the 'ns' field. No changes should be necessary to the secondaries index spec, but
+// verify that it still has the 'ns' field.
+const options = {
+ dbpath: primary.dbpath,
+ noCleanData: true
+};
+let conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
- let db = conn.getDB(dbName);
- let spec = assert.commandWorked(db.runCommand({listIndexes: collName})).cursor.firstBatch[1];
+let db = conn.getDB(dbName);
+let spec = assert.commandWorked(db.runCommand({listIndexes: collName})).cursor.firstBatch[1];
- assert.eq(true, spec.hasOwnProperty('ns'));
- assert.eq(dbName + '.' + collName, spec.ns);
+assert.eq(true, spec.hasOwnProperty('ns'));
+assert.eq(dbName + '.' + collName, spec.ns);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
- options.dbpath = secondary.dbpath;
- conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
+options.dbpath = secondary.dbpath;
+conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
- db = conn.getDB(dbName);
- spec = assert.commandWorked(db.runCommand({listIndexes: collName})).cursor.firstBatch[1];
+db = conn.getDB(dbName);
+spec = assert.commandWorked(db.runCommand({listIndexes: collName})).cursor.firstBatch[1];
- assert.eq(true, spec.hasOwnProperty('ns'));
- assert.eq(dbName + '.' + collName, spec.ns);
+assert.eq(true, spec.hasOwnProperty('ns'));
+assert.eq(dbName + '.' + collName, spec.ns);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/afterClusterTime_committed_reads.js b/jstests/noPassthrough/afterClusterTime_committed_reads.js
index 5c488ca69f4..5212c59f6f5 100644
--- a/jstests/noPassthrough/afterClusterTime_committed_reads.js
+++ b/jstests/noPassthrough/afterClusterTime_committed_reads.js
@@ -2,73 +2,72 @@
// majority commit point to move past 'afterClusterTime' before they can commit.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
- const session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- const primaryDB = session.getDatabase(dbName);
+const session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+const primaryDB = session.getDatabase(dbName);
- let txnNumber = 0;
+let txnNumber = 0;
- function testReadConcernLevel(level) {
- // Stop replication.
- stopReplicationOnSecondaries(rst);
+function testReadConcernLevel(level) {
+ // Stop replication.
+ stopReplicationOnSecondaries(rst);
- // Perform a write and get its op time.
- const res = assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{}]}));
- assert(res.hasOwnProperty("opTime"), tojson(res));
- assert(res.opTime.hasOwnProperty("ts"), tojson(res));
- const clusterTime = res.opTime.ts;
+ // Perform a write and get its op time.
+ const res = assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{}]}));
+ assert(res.hasOwnProperty("opTime"), tojson(res));
+ assert(res.opTime.hasOwnProperty("ts"), tojson(res));
+ const clusterTime = res.opTime.ts;
- // A majority-committed read-only transaction on the primary after the new cluster time
- // should time out at commit time waiting for the cluster time to be majority committed.
- assert.commandWorked(primaryDB.runCommand({
- find: collName,
- txnNumber: NumberLong(++txnNumber),
- startTransaction: true,
- autocommit: false,
- readConcern: {level: level, afterClusterTime: clusterTime}
- }));
- assert.commandFailedWithCode(primaryDB.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"},
- maxTimeMS: 1000
- }),
- ErrorCodes.MaxTimeMSExpired);
+ // A majority-committed read-only transaction on the primary after the new cluster time
+ // should time out at commit time waiting for the cluster time to be majority committed.
+ assert.commandWorked(primaryDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(++txnNumber),
+ startTransaction: true,
+ autocommit: false,
+ readConcern: {level: level, afterClusterTime: clusterTime}
+ }));
+ assert.commandFailedWithCode(primaryDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"},
+ maxTimeMS: 1000
+ }),
+ ErrorCodes.MaxTimeMSExpired);
- // Restart replication.
- restartReplicationOnSecondaries(rst);
+ // Restart replication.
+ restartReplicationOnSecondaries(rst);
- // A majority-committed read-only transaction on the primary after the new cluster time now
- // succeeds.
- assert.commandWorked(primaryDB.runCommand({
- find: collName,
- txnNumber: NumberLong(++txnNumber),
- startTransaction: true,
- autocommit: false,
- readConcern: {level: level, afterClusterTime: clusterTime}
- }));
- assert.commandWorked(primaryDB.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
- }
+ // A majority-committed read-only transaction on the primary after the new cluster time now
+ // succeeds.
+ assert.commandWorked(primaryDB.runCommand({
+ find: collName,
+ txnNumber: NumberLong(++txnNumber),
+ startTransaction: true,
+ autocommit: false,
+ readConcern: {level: level, afterClusterTime: clusterTime}
+ }));
+ assert.commandWorked(primaryDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+ }));
+}
- testReadConcernLevel("majority");
- testReadConcernLevel("snapshot");
+testReadConcernLevel("majority");
+testReadConcernLevel("snapshot");
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/after_cluster_time.js b/jstests/noPassthrough/after_cluster_time.js
index 1137e8495f2..78485d10a31 100644
--- a/jstests/noPassthrough/after_cluster_time.js
+++ b/jstests/noPassthrough/after_cluster_time.js
@@ -1,71 +1,71 @@
// This test verifies readConcern:afterClusterTime behavior on a standalone mongod.
// @tags: [requires_replication, requires_majority_read_concern]
(function() {
- "use strict";
- var standalone =
- MongoRunner.runMongod({enableMajorityReadConcern: "", storageEngine: "wiredTiger"});
+"use strict";
+var standalone =
+ MongoRunner.runMongod({enableMajorityReadConcern: "", storageEngine: "wiredTiger"});
- var testDB = standalone.getDB("test");
+var testDB = standalone.getDB("test");
- assert.commandWorked(testDB.runCommand({insert: "after_cluster_time", documents: [{x: 1}]}));
+assert.commandWorked(testDB.runCommand({insert: "after_cluster_time", documents: [{x: 1}]}));
- // Majority reads without afterClusterTime succeed.
- assert.commandWorked(
- testDB.runCommand({find: "after_cluster_time", readConcern: {level: "majority"}}),
- "expected majority read without afterClusterTime to succeed on standalone mongod");
+// Majority reads without afterClusterTime succeed.
+assert.commandWorked(
+ testDB.runCommand({find: "after_cluster_time", readConcern: {level: "majority"}}),
+ "expected majority read without afterClusterTime to succeed on standalone mongod");
- // afterClusterTime reads without a level fail.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {find: "after_cluster_time", readConcern: {afterClusterTime: Timestamp(0, 0)}}),
- ErrorCodes.InvalidOptions,
- "expected non-majority afterClusterTime read to fail on standalone mongod");
+// afterClusterTime reads without a level fail.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {find: "after_cluster_time", readConcern: {afterClusterTime: Timestamp(0, 0)}}),
+ ErrorCodes.InvalidOptions,
+ "expected non-majority afterClusterTime read to fail on standalone mongod");
- // afterClusterTime reads with null timestamps are rejected.
- assert.commandFailedWithCode(
- testDB.runCommand({
- find: "after_cluster_time",
- readConcern: {level: "majority", afterClusterTime: Timestamp(0, 0)}
- }),
- ErrorCodes.InvalidOptions,
- "expected afterClusterTime read with null timestamp to fail on standalone mongod");
-
- // Standalones don't support any operations with clusterTime.
- assert.commandFailedWithCode(testDB.runCommand({
+// afterClusterTime reads with null timestamps are rejected.
+assert.commandFailedWithCode(
+ testDB.runCommand({
find: "after_cluster_time",
- readConcern: {level: "majority", afterClusterTime: Timestamp(0, 1)}
+ readConcern: {level: "majority", afterClusterTime: Timestamp(0, 0)}
}),
- ErrorCodes.IllegalOperation,
- "expected afterClusterTime read to fail on standalone mongod");
- MongoRunner.stopMongod(standalone);
+ ErrorCodes.InvalidOptions,
+ "expected afterClusterTime read with null timestamp to fail on standalone mongod");
+
+// Standalones don't support any operations with clusterTime.
+assert.commandFailedWithCode(testDB.runCommand({
+ find: "after_cluster_time",
+ readConcern: {level: "majority", afterClusterTime: Timestamp(0, 1)}
+}),
+ ErrorCodes.IllegalOperation,
+ "expected afterClusterTime read to fail on standalone mongod");
+MongoRunner.stopMongod(standalone);
- var rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- var adminDBRS = rst.getPrimary().getDB("admin");
+var rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+var adminDBRS = rst.getPrimary().getDB("admin");
- var res = adminDBRS.runCommand({ping: 1});
- assert.commandWorked(res);
- assert(res.hasOwnProperty("$clusterTime"), tojson(res));
- assert(res.$clusterTime.hasOwnProperty("clusterTime"), tojson(res));
- var clusterTime = res.$clusterTime.clusterTime;
- // afterClusterTime is not allowed in ping command.
- assert.commandFailedWithCode(
- adminDBRS.runCommand({ping: 1, readConcern: {afterClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions,
- "expected afterClusterTime fail in ping");
+var res = adminDBRS.runCommand({ping: 1});
+assert.commandWorked(res);
+assert(res.hasOwnProperty("$clusterTime"), tojson(res));
+assert(res.$clusterTime.hasOwnProperty("clusterTime"), tojson(res));
+var clusterTime = res.$clusterTime.clusterTime;
+// afterClusterTime is not allowed in ping command.
+assert.commandFailedWithCode(
+ adminDBRS.runCommand({ping: 1, readConcern: {afterClusterTime: clusterTime}}),
+ ErrorCodes.InvalidOptions,
+ "expected afterClusterTime fail in ping");
- // afterClusterTime is not allowed in serverStatus command.
- assert.commandFailedWithCode(
- adminDBRS.runCommand({serverStatus: 1, readConcern: {afterClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions,
- "expected afterClusterTime fail in serverStatus");
+// afterClusterTime is not allowed in serverStatus command.
+assert.commandFailedWithCode(
+ adminDBRS.runCommand({serverStatus: 1, readConcern: {afterClusterTime: clusterTime}}),
+ ErrorCodes.InvalidOptions,
+ "expected afterClusterTime fail in serverStatus");
- // afterClusterTime is not allowed in currentOp command.
- assert.commandFailedWithCode(
- adminDBRS.runCommand({currentOp: 1, readConcern: {afterClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions,
- "expected afterClusterTime fail in serverStatus");
+// afterClusterTime is not allowed in currentOp command.
+assert.commandFailedWithCode(
+ adminDBRS.runCommand({currentOp: 1, readConcern: {afterClusterTime: clusterTime}}),
+ ErrorCodes.InvalidOptions,
+ "expected afterClusterTime fail in serverStatus");
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/agg_explain_read_concern.js b/jstests/noPassthrough/agg_explain_read_concern.js
index e3f0d7b8d94..9d386973450 100644
--- a/jstests/noPassthrough/agg_explain_read_concern.js
+++ b/jstests/noPassthrough/agg_explain_read_concern.js
@@ -3,69 +3,69 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const rst = new ReplSetTest(
- {name: "aggExplainReadConcernSet", nodes: 1, nodeOptions: {enableMajorityReadConcern: ""}});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest(
+ {name: "aggExplainReadConcernSet", nodes: 1, nodeOptions: {enableMajorityReadConcern: ""}});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const session = primary.getDB("test").getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase("test");
- const coll = sessionDB.agg_explain_read_concern;
+const primary = rst.getPrimary();
+const session = primary.getDB("test").getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase("test");
+const coll = sessionDB.agg_explain_read_concern;
- // Test that explain is legal with readConcern "local".
- assert.commandWorked(coll.explain().aggregate([], {readConcern: {level: "local"}}));
- assert.commandWorked(sessionDB.runCommand(
- {aggregate: coll.getName(), pipeline: [], explain: true, readConcern: {level: "local"}}));
- assert.commandWorked(sessionDB.runCommand({
- explain: {aggregate: coll.getName(), pipeline: [], cursor: {}},
- readConcern: {level: "local"}
- }));
+// Test that explain is legal with readConcern "local".
+assert.commandWorked(coll.explain().aggregate([], {readConcern: {level: "local"}}));
+assert.commandWorked(sessionDB.runCommand(
+ {aggregate: coll.getName(), pipeline: [], explain: true, readConcern: {level: "local"}}));
+assert.commandWorked(sessionDB.runCommand({
+ explain: {aggregate: coll.getName(), pipeline: [], cursor: {}},
+ readConcern: {level: "local"}
+}));
- // Test that explain is illegal with other readConcern levels.
- const nonLocalReadConcerns = ["majority", "available", "linearizable"];
- nonLocalReadConcerns.forEach(function(readConcernLevel) {
- let aggCmd = {
- aggregate: coll.getName(),
- pipeline: [],
- explain: true,
- readConcern: {level: readConcernLevel}
- };
- let explainCmd = {
- explain: {aggregate: coll.getName(), pipeline: [], cursor: {}},
- readConcern: {level: readConcernLevel}
- };
+// Test that explain is illegal with other readConcern levels.
+const nonLocalReadConcerns = ["majority", "available", "linearizable"];
+nonLocalReadConcerns.forEach(function(readConcernLevel) {
+ let aggCmd = {
+ aggregate: coll.getName(),
+ pipeline: [],
+ explain: true,
+ readConcern: {level: readConcernLevel}
+ };
+ let explainCmd = {
+ explain: {aggregate: coll.getName(), pipeline: [], cursor: {}},
+ readConcern: {level: readConcernLevel}
+ };
- assert.throws(() => coll.explain().aggregate([], {readConcern: {level: readConcernLevel}}));
+ assert.throws(() => coll.explain().aggregate([], {readConcern: {level: readConcernLevel}}));
- let cmdRes = sessionDB.runCommand(aggCmd);
- assert.commandFailedWithCode(cmdRes, ErrorCodes.InvalidOptions, tojson(cmdRes));
- let expectedErrStr = "aggregate command cannot run with a readConcern other than 'local'";
- assert.neq(cmdRes.errmsg.indexOf(expectedErrStr), -1, tojson(cmdRes));
+ let cmdRes = sessionDB.runCommand(aggCmd);
+ assert.commandFailedWithCode(cmdRes, ErrorCodes.InvalidOptions, tojson(cmdRes));
+ let expectedErrStr = "aggregate command cannot run with a readConcern other than 'local'";
+ assert.neq(cmdRes.errmsg.indexOf(expectedErrStr), -1, tojson(cmdRes));
- cmdRes = sessionDB.runCommand(explainCmd);
- assert.commandFailedWithCode(cmdRes, ErrorCodes.InvalidOptions, tojson(cmdRes));
- expectedErrStr = "Command does not support read concern";
- assert.neq(cmdRes.errmsg.indexOf(expectedErrStr), -1, tojson(cmdRes));
- });
+ cmdRes = sessionDB.runCommand(explainCmd);
+ assert.commandFailedWithCode(cmdRes, ErrorCodes.InvalidOptions, tojson(cmdRes));
+ expectedErrStr = "Command does not support read concern";
+ assert.neq(cmdRes.errmsg.indexOf(expectedErrStr), -1, tojson(cmdRes));
+});
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/aggregation_cursor_invalidations.js b/jstests/noPassthrough/aggregation_cursor_invalidations.js
index 387fbe56952..7192e9595bc 100644
--- a/jstests/noPassthrough/aggregation_cursor_invalidations.js
+++ b/jstests/noPassthrough/aggregation_cursor_invalidations.js
@@ -10,114 +10,115 @@
* @tags: [do_not_wrap_aggregations_in_facets, requires_capped]
*/
(function() {
- 'use strict';
-
- // This test runs a getMore in a parallel shell, which will not inherit the implicit session of
- // the cursor establishing command.
- TestData.disableImplicitSessions = true;
-
- // The DocumentSourceCursor which wraps PlanExecutors will batch results internally. We use the
- // 'internalDocumentSourceCursorBatchSizeBytes' parameter to disable this behavior so that we
- // can easily pause a pipeline in a state where it will need to request more results from the
- // PlanExecutor.
- const options = {setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
-
- const testDB = conn.getDB('test');
-
- // Make sure the number of results is greater than the batchSize to ensure the results
- // cannot all fit in one batch.
- const batchSize = 2;
- const numMatches = batchSize + 1;
- const sourceCollection = testDB.source;
- const foreignCollection = testDB.foreign;
-
- /**
- * Populates both 'sourceCollection' and 'foreignCollection' with values of 'local' and
- * 'foreign' in the range [0, 'numMatches').
- */
- function setup() {
- sourceCollection.drop();
- foreignCollection.drop();
- for (let i = 0; i < numMatches; ++i) {
- assert.writeOK(sourceCollection.insert({_id: i, local: i}));
-
- // We want to be able to pause a $lookup stage in a state where it has returned some but
- // not all of the results for a single lookup, so we need to insert at least
- // 'numMatches' matches for each source document.
- for (let j = 0; j < numMatches; ++j) {
- assert.writeOK(foreignCollection.insert({_id: numMatches * i + j, foreign: i}));
- }
- }
- }
-
- // Check that there are no cursors still open on the source collection. If any are found, the
- // test will fail and print a list of idle cursors. This should be called each time we
- // expect a cursor to have been destroyed.
- function assertNoOpenCursorsOnSourceCollection() {
- const cursors =
- testDB.getSiblingDB("admin")
- .aggregate([
- {"$currentOp": {"idleCursors": true}},
- {
- "$match": {ns: sourceCollection.getFullName(), "type": "idleCursor"}
-
- }
- ])
- .toArray();
- assert.eq(
- cursors.length, 0, "Did not expect to find any cursors, but found " + tojson(cursors));
- }
-
- const defaultAggregateCmdSmallBatch = {
- aggregate: sourceCollection.getName(),
- pipeline: [],
- cursor: {
- batchSize: batchSize,
- },
- };
-
- // Test that dropping the source collection between an aggregate and a getMore will cause an
- // aggregation pipeline to fail during the getMore if it needs to fetch more results from the
- // collection.
- setup();
- let res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- sourceCollection.drop();
-
- let getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.QueryPlanKilled,
- 'expected getMore to fail because the source collection was dropped');
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that dropping the source collection between an aggregate and a getMore will *not* cause
- // an aggregation pipeline to fail during the getMore if it *does not need* to fetch more
- // results from the collection.
- setup();
- res = assert.commandWorked(testDB.runCommand({
- aggregate: sourceCollection.getName(),
- pipeline: [{$sort: {x: 1}}],
- cursor: {
- batchSize: batchSize,
- },
- }));
+'use strict';
+
+// This test runs a getMore in a parallel shell, which will not inherit the implicit session of
+// the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+// The DocumentSourceCursor which wraps PlanExecutors will batch results internally. We use the
+// 'internalDocumentSourceCursorBatchSizeBytes' parameter to disable this behavior so that we
+// can easily pause a pipeline in a state where it will need to request more results from the
+// PlanExecutor.
+const options = {
+ setParameter: 'internalDocumentSourceCursorBatchSizeBytes=1'
+};
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, 'mongod was unable to start up with options: ' + tojson(options));
+
+const testDB = conn.getDB('test');
+
+// Make sure the number of results is greater than the batchSize to ensure the results
+// cannot all fit in one batch.
+const batchSize = 2;
+const numMatches = batchSize + 1;
+const sourceCollection = testDB.source;
+const foreignCollection = testDB.foreign;
+/**
+ * Populates both 'sourceCollection' and 'foreignCollection' with values of 'local' and
+ * 'foreign' in the range [0, 'numMatches').
+ */
+function setup() {
sourceCollection.drop();
-
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- assert.commandWorked(testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}));
-
- // Test that dropping a $lookup stage's foreign collection between an aggregate and a getMore
- // will *not* cause an aggregation pipeline to fail during the getMore if it needs to fetch more
- // results from the foreign collection. It will instead return no matches for subsequent
- // lookups, as if the foreign collection was empty.
- setup();
- res = assert.commandWorked(testDB.runCommand({
+ foreignCollection.drop();
+ for (let i = 0; i < numMatches; ++i) {
+ assert.writeOK(sourceCollection.insert({_id: i, local: i}));
+
+ // We want to be able to pause a $lookup stage in a state where it has returned some but
+ // not all of the results for a single lookup, so we need to insert at least
+ // 'numMatches' matches for each source document.
+ for (let j = 0; j < numMatches; ++j) {
+ assert.writeOK(foreignCollection.insert({_id: numMatches * i + j, foreign: i}));
+ }
+ }
+}
+
+// Check that there are no cursors still open on the source collection. If any are found, the
+// test will fail and print a list of idle cursors. This should be called each time we
+// expect a cursor to have been destroyed.
+function assertNoOpenCursorsOnSourceCollection() {
+ const cursors = testDB.getSiblingDB("admin")
+ .aggregate([
+ {"$currentOp": {"idleCursors": true}},
+ {
+ "$match": {ns: sourceCollection.getFullName(), "type": "idleCursor"}
+
+ }
+ ])
+ .toArray();
+ assert.eq(
+ cursors.length, 0, "Did not expect to find any cursors, but found " + tojson(cursors));
+}
+
+const defaultAggregateCmdSmallBatch = {
+ aggregate: sourceCollection.getName(),
+ pipeline: [],
+ cursor: {
+ batchSize: batchSize,
+ },
+};
+
+// Test that dropping the source collection between an aggregate and a getMore will cause an
+// aggregation pipeline to fail during the getMore if it needs to fetch more results from the
+// collection.
+setup();
+let res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+sourceCollection.drop();
+
+let getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.QueryPlanKilled,
+ 'expected getMore to fail because the source collection was dropped');
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that dropping the source collection between an aggregate and a getMore will *not* cause
+// an aggregation pipeline to fail during the getMore if it *does not need* to fetch more
+// results from the collection.
+setup();
+res = assert.commandWorked(testDB.runCommand({
+ aggregate: sourceCollection.getName(),
+ pipeline: [{$sort: {x: 1}}],
+ cursor: {
+ batchSize: batchSize,
+ },
+}));
+
+sourceCollection.drop();
+
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+assert.commandWorked(testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}));
+
+// Test that dropping a $lookup stage's foreign collection between an aggregate and a getMore
+// will *not* cause an aggregation pipeline to fail during the getMore if it needs to fetch more
+// results from the foreign collection. It will instead return no matches for subsequent
+// lookups, as if the foreign collection was empty.
+setup();
+res = assert.commandWorked(testDB.runCommand({
aggregate: sourceCollection.getName(),
pipeline: [
{
@@ -134,25 +135,25 @@
},
}));
- foreignCollection.drop();
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
- assert.commandWorked(
- res, 'expected getMore to succeed despite the foreign collection being dropped');
- res.cursor.nextBatch.forEach(function(aggResult) {
- assert.eq(aggResult.results,
- [],
- 'expected results of $lookup into non-existent collection to be empty');
- });
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that a $lookup stage will properly clean up its cursor if it becomes invalidated between
- // batches of a single lookup. This is the same scenario as above, but with the $lookup stage
- // left in a state where it has returned some but not all of the matches for a single lookup.
- setup();
- res = assert.commandWorked(testDB.runCommand({
+foreignCollection.drop();
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
+assert.commandWorked(res,
+ 'expected getMore to succeed despite the foreign collection being dropped');
+res.cursor.nextBatch.forEach(function(aggResult) {
+ assert.eq(aggResult.results,
+ [],
+ 'expected results of $lookup into non-existent collection to be empty');
+});
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that a $lookup stage will properly clean up its cursor if it becomes invalidated between
+// batches of a single lookup. This is the same scenario as above, but with the $lookup stage
+// left in a state where it has returned some but not all of the matches for a single lookup.
+setup();
+res = assert.commandWorked(testDB.runCommand({
aggregate: sourceCollection.getName(),
pipeline: [
{
@@ -172,22 +173,22 @@
},
}));
- foreignCollection.drop();
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.QueryPlanKilled,
- 'expected getMore to fail because the foreign collection was dropped');
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that dropping a $graphLookup stage's foreign collection between an aggregate and a
- // getMore will *not* cause an aggregation pipeline to fail during the getMore if it needs to
- // fetch more results from the foreign collection. It will instead return no matches for
- // subsequent lookups, as if the foreign collection was empty.
- setup();
- res = assert.commandWorked(testDB.runCommand({
+foreignCollection.drop();
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.QueryPlanKilled,
+ 'expected getMore to fail because the foreign collection was dropped');
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that dropping a $graphLookup stage's foreign collection between an aggregate and a
+// getMore will *not* cause an aggregation pipeline to fail during the getMore if it needs to
+// fetch more results from the foreign collection. It will instead return no matches for
+// subsequent lookups, as if the foreign collection was empty.
+setup();
+res = assert.commandWorked(testDB.runCommand({
aggregate: sourceCollection.getName(),
pipeline: [
{
@@ -205,19 +206,19 @@
},
}));
- foreignCollection.drop();
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
- assert.commandWorked(
- res, 'expected getMore to succeed despite the foreign collection being dropped');
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that the getMore still succeeds if the $graphLookup is followed by an $unwind on the
- // 'as' field and the collection is dropped between the initial request and a getMore.
- setup();
- res = assert.commandWorked(testDB.runCommand({
+foreignCollection.drop();
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
+assert.commandWorked(res,
+ 'expected getMore to succeed despite the foreign collection being dropped');
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that the getMore still succeeds if the $graphLookup is followed by an $unwind on the
+// 'as' field and the collection is dropped between the initial request and a getMore.
+setup();
+res = assert.commandWorked(testDB.runCommand({
aggregate: sourceCollection.getName(),
pipeline: [
{
@@ -236,149 +237,146 @@
},
}));
- foreignCollection.drop();
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
- assert.commandWorked(
- res, 'expected getMore to succeed despite the foreign collection being dropped');
-
- // Make sure the cursors were cleaned up.
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that dropping the database will kill an aggregation's cursor, causing a subsequent
- // getMore to fail.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- assert.commandWorked(sourceCollection.getDB().dropDatabase());
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
-
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.QueryPlanKilled,
- 'expected getMore to fail because the database was dropped');
-
- assertNoOpenCursorsOnSourceCollection();
-
- // Test that killing an aggregation's cursor by inserting enough documents to force a truncation
- // of a capped collection will cause a subsequent getMore to fail.
- sourceCollection.drop();
- foreignCollection.drop();
- const maxCappedSizeBytes = 64 * 1024;
- const maxNumDocs = 10;
- assert.commandWorked(testDB.runCommand({
- create: sourceCollection.getName(),
- capped: true,
- size: maxCappedSizeBytes,
- max: maxNumDocs
- }));
- // Fill up about half of the collection.
- for (let i = 0; i < maxNumDocs / 2; ++i) {
- assert.writeOK(sourceCollection.insert({_id: i}));
- }
- // Start an aggregation.
- assert.gt(maxNumDocs / 2, batchSize);
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
- // Insert enough to force a truncation.
- for (let i = maxNumDocs / 2; i < 2 * maxNumDocs; ++i) {
- assert.writeOK(sourceCollection.insert({_id: i}));
- }
- assert.eq(maxNumDocs, sourceCollection.count());
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.CappedPositionLost,
- 'expected getMore to fail because the capped collection was truncated');
-
- // Test that killing an aggregation's cursor via the killCursors command will cause a subsequent
- // getMore to fail.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- const killCursorsNamespace = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- assert.commandWorked(
- testDB.runCommand({killCursors: killCursorsNamespace, cursors: [res.cursor.id]}));
-
- assertNoOpenCursorsOnSourceCollection();
-
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
-
- // Test that killing an aggregation's operation via the killOp command will cause a getMore to
- // fail.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- // Use a failpoint to cause a getMore to hang indefinitely.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'waitAfterPinningCursorBeforeGetMoreBatch', mode: 'alwaysOn'}));
- const curOpFilter = {'command.getMore': res.cursor.id};
- assert.eq(0, testDB.currentOp(curOpFilter).inprog.length);
-
- getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
- const parallelShellCode = 'assert.commandFailedWithCode(db.getSiblingDB(\'' + testDB.getName() +
- '\').runCommand({getMore: ' + res.cursor.id.toString() + ', collection: \'' +
- getMoreCollName +
- '\'}), ErrorCodes.Interrupted, \'expected getMore command to be interrupted by killOp\');';
-
- // Start a getMore and wait for it to hang.
- const awaitParallelShell = startParallelShell(parallelShellCode, conn.port);
- assert.soon(function() {
- return assert.commandWorked(testDB.currentOp(curOpFilter)).inprog.length === 1;
- }, 'expected getMore operation to remain active');
-
- // Wait until we know the failpoint has been reached.
- assert.soon(function() {
- const filter = {"msg": "waitAfterPinningCursorBeforeGetMoreBatch"};
- return assert.commandWorked(testDB.currentOp(filter)).inprog.length === 1;
+foreignCollection.drop();
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+res = testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName});
+assert.commandWorked(res,
+ 'expected getMore to succeed despite the foreign collection being dropped');
+
+// Make sure the cursors were cleaned up.
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that dropping the database will kill an aggregation's cursor, causing a subsequent
+// getMore to fail.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+assert.commandWorked(sourceCollection.getDB().dropDatabase());
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.QueryPlanKilled,
+ 'expected getMore to fail because the database was dropped');
+
+assertNoOpenCursorsOnSourceCollection();
+
+// Test that killing an aggregation's cursor by inserting enough documents to force a truncation
+// of a capped collection will cause a subsequent getMore to fail.
+sourceCollection.drop();
+foreignCollection.drop();
+const maxCappedSizeBytes = 64 * 1024;
+const maxNumDocs = 10;
+assert.commandWorked(testDB.runCommand(
+ {create: sourceCollection.getName(), capped: true, size: maxCappedSizeBytes, max: maxNumDocs}));
+// Fill up about half of the collection.
+for (let i = 0; i < maxNumDocs / 2; ++i) {
+ assert.writeOK(sourceCollection.insert({_id: i}));
+}
+// Start an aggregation.
+assert.gt(maxNumDocs / 2, batchSize);
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+// Insert enough to force a truncation.
+for (let i = maxNumDocs / 2; i < 2 * maxNumDocs; ++i) {
+ assert.writeOK(sourceCollection.insert({_id: i}));
+}
+assert.eq(maxNumDocs, sourceCollection.count());
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.CappedPositionLost,
+ 'expected getMore to fail because the capped collection was truncated');
+
+// Test that killing an aggregation's cursor via the killCursors command will cause a subsequent
+// getMore to fail.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+const killCursorsNamespace = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+assert.commandWorked(
+ testDB.runCommand({killCursors: killCursorsNamespace, cursors: [res.cursor.id]}));
+
+assertNoOpenCursorsOnSourceCollection();
+
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+
+// Test that killing an aggregation's operation via the killOp command will cause a getMore to
+// fail.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+// Use a failpoint to cause a getMore to hang indefinitely.
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'waitAfterPinningCursorBeforeGetMoreBatch', mode: 'alwaysOn'}));
+const curOpFilter = {
+ 'command.getMore': res.cursor.id
+};
+assert.eq(0, testDB.currentOp(curOpFilter).inprog.length);
+
+getMoreCollName = res.cursor.ns.substr(res.cursor.ns.indexOf('.') + 1);
+const parallelShellCode = 'assert.commandFailedWithCode(db.getSiblingDB(\'' + testDB.getName() +
+ '\').runCommand({getMore: ' + res.cursor.id.toString() + ', collection: \'' + getMoreCollName +
+ '\'}), ErrorCodes.Interrupted, \'expected getMore command to be interrupted by killOp\');';
+
+// Start a getMore and wait for it to hang.
+const awaitParallelShell = startParallelShell(parallelShellCode, conn.port);
+assert.soon(function() {
+ return assert.commandWorked(testDB.currentOp(curOpFilter)).inprog.length === 1;
+}, 'expected getMore operation to remain active');
+
+// Wait until we know the failpoint has been reached.
+assert.soon(function() {
+ const filter = {"msg": "waitAfterPinningCursorBeforeGetMoreBatch"};
+ return assert.commandWorked(testDB.currentOp(filter)).inprog.length === 1;
+});
+
+// Kill the operation.
+const opId = assert.commandWorked(testDB.currentOp(curOpFilter)).inprog[0].opid;
+assert.commandWorked(testDB.killOp(opId));
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'waitAfterPinningCursorBeforeGetMoreBatch', mode: 'off'}));
+assert.eq(0, awaitParallelShell());
+
+assertNoOpenCursorsOnSourceCollection();
+
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+
+// Test that a cursor timeout of an aggregation's cursor will cause a subsequent getMore to
+// fail.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+
+let serverStatus = assert.commandWorked(testDB.serverStatus());
+const expectedNumTimedOutCursors = serverStatus.metrics.cursor.timedOut + 1;
+
+// Wait until the idle cursor background job has killed the aggregation cursor.
+assert.commandWorked(testDB.adminCommand({setParameter: 1, cursorTimeoutMillis: 10}));
+const cursorTimeoutFrequencySeconds = 1;
+assert.commandWorked(testDB.adminCommand(
+ {setParameter: 1, clientCursorMonitorFrequencySecs: cursorTimeoutFrequencySeconds}));
+assert.soon(
+ function() {
+ serverStatus = assert.commandWorked(testDB.serverStatus());
+ return serverStatus.metrics.cursor.timedOut == expectedNumTimedOutCursors;
+ },
+ function() {
+ return 'aggregation cursor failed to time out, expected ' + expectedNumTimedOutCursors +
+ ' timed out cursors: ' + tojson(serverStatus.metrics.cursor);
});
- // Kill the operation.
- const opId = assert.commandWorked(testDB.currentOp(curOpFilter)).inprog[0].opid;
- assert.commandWorked(testDB.killOp(opId));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'waitAfterPinningCursorBeforeGetMoreBatch', mode: 'off'}));
- assert.eq(0, awaitParallelShell());
-
- assertNoOpenCursorsOnSourceCollection();
-
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
-
- // Test that a cursor timeout of an aggregation's cursor will cause a subsequent getMore to
- // fail.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
-
- let serverStatus = assert.commandWorked(testDB.serverStatus());
- const expectedNumTimedOutCursors = serverStatus.metrics.cursor.timedOut + 1;
-
- // Wait until the idle cursor background job has killed the aggregation cursor.
- assert.commandWorked(testDB.adminCommand({setParameter: 1, cursorTimeoutMillis: 10}));
- const cursorTimeoutFrequencySeconds = 1;
- assert.commandWorked(testDB.adminCommand(
- {setParameter: 1, clientCursorMonitorFrequencySecs: cursorTimeoutFrequencySeconds}));
- assert.soon(
- function() {
- serverStatus = assert.commandWorked(testDB.serverStatus());
- return serverStatus.metrics.cursor.timedOut == expectedNumTimedOutCursors;
- },
- function() {
- return 'aggregation cursor failed to time out, expected ' + expectedNumTimedOutCursors +
- ' timed out cursors: ' + tojson(serverStatus.metrics.cursor);
- });
-
- assertNoOpenCursorsOnSourceCollection();
- assert.commandFailedWithCode(
- testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
-
- // Test that a cursor will properly be cleaned up on server shutdown.
- setup();
- res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
- assert.eq(0, MongoRunner.stopMongod(conn), 'expected mongod to shutdown cleanly');
+assertNoOpenCursorsOnSourceCollection();
+assert.commandFailedWithCode(
+ testDB.runCommand({getMore: res.cursor.id, collection: getMoreCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+
+// Test that a cursor will properly be cleaned up on server shutdown.
+setup();
+res = assert.commandWorked(testDB.runCommand(defaultAggregateCmdSmallBatch));
+assert.eq(0, MongoRunner.stopMongod(conn), 'expected mongod to shutdown cleanly');
})();
diff --git a/jstests/noPassthrough/aggregation_log_namespace.js b/jstests/noPassthrough/aggregation_log_namespace.js
index ad9f6b6d7b1..a45a3a96597 100644
--- a/jstests/noPassthrough/aggregation_log_namespace.js
+++ b/jstests/noPassthrough/aggregation_log_namespace.js
@@ -2,53 +2,52 @@
// command when a pipeline contains a stage that can write into an output collection.
// @tags: [requires_profiling]
(function() {
- 'use strict';
-
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWriteStage.
- load("jstests/libs/check_log.js"); // For checkLogs.containsWithCount.
-
- // Runs the given 'pipeline' and verifies that the namespace is correctly logged in the global
- // log for the aggregate command. The 'comment' parameter is used to match a log entry against
- // the aggregate command.
- function verifyLoggedNamespace({pipeline, comment}) {
- assert.commandWorked(db.runCommand(
- {aggregate: source.getName(), comment: comment, pipeline: pipeline, cursor: {}}));
- checkLog.containsWithCount(
- conn,
- `command ${source.getFullName()} appName: "MongoDB Shell" ` +
- `command: aggregate { aggregate: "${source.getName()}", comment: "${comment}"`,
- 1);
- }
-
- const mongodOptions = {};
- const conn = MongoRunner.runMongod(mongodOptions);
- assert.neq(null, conn, `mongod failed to start with options ${tojson(mongodOptions)}`);
-
- const db = conn.getDB(`${jsTest.name()}_db`);
- const source = db.getCollection(`${jsTest.name()}_source`);
- source.drop();
- const target = db.getCollection(`${jsTest.name()}_target`);
- target.drop();
-
- // Make sure each command gets logged.
- assert.commandWorked(db.setProfilingLevel(1, {slowms: 0}));
-
- // Test stages that can write into an output collection.
- withEachKindOfWriteStage(
- target,
- (stage) => verifyLoggedNamespace({pipeline: [stage], comment: Object.keys(stage)[0]}));
-
- // Test each $merge mode.
- withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => verifyLoggedNamespace({
- pipeline: [{
- $merge: {
- into: target.getName(),
- whenMatched: whenMatchedMode,
- whenNotMatched: whenNotMatchedMode
- }
- }],
- comment: `merge_${whenMatchedMode}_${whenNotMatchedMode}`
- }));
-
- MongoRunner.stopMongod(conn);
+'use strict';
+
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWriteStage.
+load("jstests/libs/check_log.js"); // For checkLogs.containsWithCount.
+
+// Runs the given 'pipeline' and verifies that the namespace is correctly logged in the global
+// log for the aggregate command. The 'comment' parameter is used to match a log entry against
+// the aggregate command.
+function verifyLoggedNamespace({pipeline, comment}) {
+ assert.commandWorked(db.runCommand(
+ {aggregate: source.getName(), comment: comment, pipeline: pipeline, cursor: {}}));
+ checkLog.containsWithCount(
+ conn,
+ `command ${source.getFullName()} appName: "MongoDB Shell" ` +
+ `command: aggregate { aggregate: "${source.getName()}", comment: "${comment}"`,
+ 1);
+}
+
+const mongodOptions = {};
+const conn = MongoRunner.runMongod(mongodOptions);
+assert.neq(null, conn, `mongod failed to start with options ${tojson(mongodOptions)}`);
+
+const db = conn.getDB(`${jsTest.name()}_db`);
+const source = db.getCollection(`${jsTest.name()}_source`);
+source.drop();
+const target = db.getCollection(`${jsTest.name()}_target`);
+target.drop();
+
+// Make sure each command gets logged.
+assert.commandWorked(db.setProfilingLevel(1, {slowms: 0}));
+
+// Test stages that can write into an output collection.
+withEachKindOfWriteStage(
+ target, (stage) => verifyLoggedNamespace({pipeline: [stage], comment: Object.keys(stage)[0]}));
+
+// Test each $merge mode.
+withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => verifyLoggedNamespace({
+ pipeline: [{
+ $merge: {
+ into: target.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ }
+ }],
+ comment: `merge_${whenMatchedMode}_${whenNotMatchedMode}`
+ }));
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/aggregation_zero_batchsize.js b/jstests/noPassthrough/aggregation_zero_batchsize.js
index 3360c6d0856..d143c75ede6 100644
--- a/jstests/noPassthrough/aggregation_zero_batchsize.js
+++ b/jstests/noPassthrough/aggregation_zero_batchsize.js
@@ -3,86 +3,86 @@
* retrieved via getMores.
*/
(function() {
- "use strict";
+"use strict";
- const mongodOptions = {};
- const conn = MongoRunner.runMongod(mongodOptions);
- assert.neq(null, conn, "mongod failed to start with options " + tojson(mongodOptions));
+const mongodOptions = {};
+const conn = MongoRunner.runMongod(mongodOptions);
+assert.neq(null, conn, "mongod failed to start with options " + tojson(mongodOptions));
- const testDB = conn.getDB("test");
- const coll = testDB[jsTest.name];
- coll.drop();
+const testDB = conn.getDB("test");
+const coll = testDB[jsTest.name];
+coll.drop();
- // Test that an aggregate is successful on a non-existent collection.
- assert.eq(0,
- coll.aggregate([]).toArray().length,
- "expected no results from an aggregation on an empty collection");
+// Test that an aggregate is successful on a non-existent collection.
+assert.eq(0,
+ coll.aggregate([]).toArray().length,
+ "expected no results from an aggregation on an empty collection");
- // Test that an aggregate is successful on a non-existent collection with a batchSize of 0, and
- // that a getMore will succeed with an empty result set.
- let res = assert.commandWorked(
- testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}));
+// Test that an aggregate is successful on a non-existent collection with a batchSize of 0, and
+// that a getMore will succeed with an empty result set.
+let res = assert.commandWorked(
+ testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}));
- let cursor = new DBCommandCursor(testDB, res);
- assert.eq(
- 0, cursor.itcount(), "expected no results from getMore of aggregation on empty collection");
+let cursor = new DBCommandCursor(testDB, res);
+assert.eq(
+ 0, cursor.itcount(), "expected no results from getMore of aggregation on empty collection");
- // Test that an aggregation can return *all* matching data via getMores if the initial aggregate
- // used a batchSize of 0.
- const nDocs = 1000;
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert({_id: i, stringField: "string"});
- }
- assert.writeOK(bulk.execute());
+// Test that an aggregation can return *all* matching data via getMores if the initial aggregate
+// used a batchSize of 0.
+const nDocs = 1000;
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i, stringField: "string"});
+}
+assert.writeOK(bulk.execute());
- res = assert.commandWorked(
- testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}));
- cursor = new DBCommandCursor(testDB, res);
- assert.eq(nDocs, cursor.itcount(), "expected all results to be returned via getMores");
+res = assert.commandWorked(
+ testDB.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}));
+cursor = new DBCommandCursor(testDB, res);
+assert.eq(nDocs, cursor.itcount(), "expected all results to be returned via getMores");
- // Test that an error in a getMore will destroy the cursor.
- function assertNumOpenCursors(nExpectedOpen) {
- let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- assert.eq(nExpectedOpen,
- serverStatus.metrics.cursor.open.total,
- "expected to find " + nExpectedOpen + " open cursor(s): " +
- tojson(serverStatus.metrics.cursor));
- }
+// Test that an error in a getMore will destroy the cursor.
+function assertNumOpenCursors(nExpectedOpen) {
+ let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ assert.eq(nExpectedOpen,
+ serverStatus.metrics.cursor.open.total,
+ "expected to find " + nExpectedOpen +
+ " open cursor(s): " + tojson(serverStatus.metrics.cursor));
+}
- // Issue an aggregate command that will fail *at runtime*, so the error will happen in a
- // getMore.
- assertNumOpenCursors(0);
- res = assert.commandWorked(testDB.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$project: {invalidComputation: {$add: [1, "$stringField"]}}}],
- cursor: {batchSize: 0}
- }));
- cursor = new DBCommandCursor(testDB, res);
- assertNumOpenCursors(1);
+// Issue an aggregate command that will fail *at runtime*, so the error will happen in a
+// getMore.
+assertNumOpenCursors(0);
+res = assert.commandWorked(testDB.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$project: {invalidComputation: {$add: [1, "$stringField"]}}}],
+ cursor: {batchSize: 0}
+}));
+cursor = new DBCommandCursor(testDB, res);
+assertNumOpenCursors(1);
- assert.throws(() => cursor.itcount(), [], "expected getMore to fail");
- assertNumOpenCursors(0);
+assert.throws(() => cursor.itcount(), [], "expected getMore to fail");
+assertNumOpenCursors(0);
- // Test that an error in a getMore using a $out stage will destroy the cursor. This test is
- // intended to reproduce SERVER-26608.
+// Test that an error in a getMore using a $out stage will destroy the cursor. This test is
+// intended to reproduce SERVER-26608.
- // Issue an aggregate command that will fail *at runtime*, so the error will happen in a
- // getMore.
- res = assert.commandWorked(testDB.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$out: "validated_collection"}],
- cursor: {batchSize: 0}
- }));
- cursor = new DBCommandCursor(testDB, res);
- assertNumOpenCursors(1);
+// Issue an aggregate command that will fail *at runtime*, so the error will happen in a
+// getMore.
+res = assert.commandWorked(testDB.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$out: "validated_collection"}],
+ cursor: {batchSize: 0}
+}));
+cursor = new DBCommandCursor(testDB, res);
+assertNumOpenCursors(1);
- // Add a document validation rule to the $out collection so that insertion will fail.
- assert.commandWorked(testDB.runCommand(
- {create: "validated_collection", validator: {stringField: {$type: "int"}}}));
+// Add a document validation rule to the $out collection so that insertion will fail.
+assert.commandWorked(
+ testDB.runCommand({create: "validated_collection", validator: {stringField: {$type: "int"}}}));
- assert.throws(() => cursor.itcount(), [], "expected getMore to fail");
- assertNumOpenCursors(0);
+assert.throws(() => cursor.itcount(), [], "expected getMore to fail");
+assertNumOpenCursors(0);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js b/jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js
index d8cd49f7995..3e855455985 100644
--- a/jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js
+++ b/jstests/noPassthrough/apply_ops_DDL_operation_does_not_take_global_X.js
@@ -5,75 +5,72 @@
*/
(function() {
- 'use strict';
+'use strict';
+
+const testDBName = 'test';
+const readDBName = 'read';
+const readCollName = 'readColl';
+const testCollName = 'testColl';
+const renameCollName = 'renameColl';
+
+const rst = new ReplSetTest({name: jsTestName(), nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+
+assert.commandWorked(
+ primary.getDB(readDBName)
+ .runCommand({insert: readCollName, documents: [{x: 1}], writeConcern: {w: 2}}));
+
+// The find will hang and holds a global IS lock.
+assert.commandWorked(secondary.getDB("admin").runCommand(
+ {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
+
+const findWait = startParallelShell(function() {
+ db.getMongo().setSlaveOk();
+ assert.eq(
+ db.getSiblingDB('read').getCollection('readColl').find().comment('read hangs').itcount(),
+ 1);
+}, secondary.port);
+
+assert.soon(function() {
+ let findOp = secondary.getDB('admin')
+ .aggregate([{$currentOp: {}}, {$match: {'command.comment': 'read hangs'}}])
+ .toArray();
+ return findOp.length == 1;
+});
+
+{
+ // Run a series of DDL commands, none of which should take the global X lock.
+ const testDB = primary.getDB(testDBName);
+ assert.commandWorked(testDB.runCommand({create: testCollName, writeConcern: {w: 2}}));
- const testDBName = 'test';
- const readDBName = 'read';
- const readCollName = 'readColl';
- const testCollName = 'testColl';
- const renameCollName = 'renameColl';
-
- const rst = new ReplSetTest({name: jsTestName(), nodes: 2});
- rst.startSet();
- rst.initiate();
+ assert.commandWorked(
+ testDB.runCommand({collMod: testCollName, validator: {v: 1}, writeConcern: {w: 2}}));
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
+ assert.commandWorked(testDB.runCommand({
+ createIndexes: testCollName,
+ indexes: [{key: {x: 1}, name: 'x_1'}],
+ writeConcern: {w: 2}
+ }));
assert.commandWorked(
- primary.getDB(readDBName)
- .runCommand({insert: readCollName, documents: [{x: 1}], writeConcern: {w: 2}}));
-
- // The find will hang and holds a global IS lock.
- assert.commandWorked(secondary.getDB("admin").runCommand(
- {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
-
- const findWait = startParallelShell(function() {
- db.getMongo().setSlaveOk();
- assert.eq(db.getSiblingDB('read')
- .getCollection('readColl')
- .find()
- .comment('read hangs')
- .itcount(),
- 1);
- }, secondary.port);
-
- assert.soon(function() {
- let findOp = secondary.getDB('admin')
- .aggregate([{$currentOp: {}}, {$match: {'command.comment': 'read hangs'}}])
- .toArray();
- return findOp.length == 1;
- });
-
- {
- // Run a series of DDL commands, none of which should take the global X lock.
- const testDB = primary.getDB(testDBName);
- assert.commandWorked(testDB.runCommand({create: testCollName, writeConcern: {w: 2}}));
-
- assert.commandWorked(
- testDB.runCommand({collMod: testCollName, validator: {v: 1}, writeConcern: {w: 2}}));
-
- assert.commandWorked(testDB.runCommand({
- createIndexes: testCollName,
- indexes: [{key: {x: 1}, name: 'x_1'}],
- writeConcern: {w: 2}
- }));
-
- assert.commandWorked(
- testDB.runCommand({dropIndexes: testCollName, index: 'x_1', writeConcern: {w: 2}}));
-
- assert.commandWorked(primary.getDB('admin').runCommand({
- renameCollection: testDBName + '.' + testCollName,
- to: testDBName + '.' + renameCollName,
- writeConcern: {w: 2}
- }));
-
- assert.commandWorked(testDB.runCommand({drop: renameCollName, writeConcern: {w: 2}}));
- }
-
- assert.commandWorked(secondary.getDB("admin").runCommand(
- {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
- findWait();
-
- rst.stopSet();
+ testDB.runCommand({dropIndexes: testCollName, index: 'x_1', writeConcern: {w: 2}}));
+
+ assert.commandWorked(primary.getDB('admin').runCommand({
+ renameCollection: testDBName + '.' + testCollName,
+ to: testDBName + '.' + renameCollName,
+ writeConcern: {w: 2}
+ }));
+
+ assert.commandWorked(testDB.runCommand({drop: renameCollName, writeConcern: {w: 2}}));
+}
+
+assert.commandWorked(secondary.getDB("admin").runCommand(
+ {configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
+findWait();
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/apply_ops_mode.js b/jstests/noPassthrough/apply_ops_mode.js
index 3515f4d8040..385cf0d532b 100644
--- a/jstests/noPassthrough/apply_ops_mode.js
+++ b/jstests/noPassthrough/apply_ops_mode.js
@@ -5,89 +5,85 @@
*/
(function() {
- 'use strict';
- load('jstests/libs/feature_compatibility_version.js');
-
- var standalone = MongoRunner.runMongod();
- var db = standalone.getDB("test");
-
- var coll = db.getCollection("apply_ops_mode1");
- coll.drop();
- assert.writeOK(coll.insert({_id: 1}));
-
- // ------------ Testing normal updates ---------------
-
- var id = ObjectId();
- var updateOp = {op: 'u', ns: coll.getFullName(), o: {_id: id, x: 1}, o2: {_id: id}};
- assert.commandFailed(db.adminCommand({applyOps: [updateOp], alwaysUpsert: false}));
- assert.eq(coll.count({x: 1}), 0);
-
- // Test that 'InitialSync' does not override 'alwaysUpsert: false'.
- assert.commandFailed(db.adminCommand(
- {applyOps: [updateOp], alwaysUpsert: false, oplogApplicationMode: "InitialSync"}));
- assert.eq(coll.count({x: 1}), 0);
-
- // Test parsing failure.
- assert.commandFailedWithCode(
- db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "BadMode"}),
- ErrorCodes.FailedToParse);
- assert.commandFailedWithCode(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: 5}),
- ErrorCodes.TypeMismatch);
-
- // Test default succeeds.
- assert.commandWorked(db.adminCommand({applyOps: [updateOp]}));
- assert.eq(coll.count({x: 1}), 1);
-
- // Use new collection to make logs cleaner.
- coll = db.getCollection("apply_ops_mode2");
- coll.drop();
- updateOp.ns = coll.getFullName();
- assert.writeOK(coll.insert({_id: 1}));
-
- // Test default succeeds in 'InitialSync' mode.
- assert.commandWorked(
- db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"}));
- assert.eq(coll.count({x: 1}), 1);
-
- // ------------ Testing fCV updates ---------------
-
- var adminDB = db.getSiblingDB("admin");
- const systemVersionColl = adminDB.getCollection("system.version");
-
- updateOp = {
- op: 'u',
- ns: systemVersionColl.getFullName(),
- o: {_id: "featureCompatibilityVersion", version: lastStableFCV},
- o2: {_id: "featureCompatibilityVersion"}
- };
- assert.commandFailed(
- db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"}));
-
- assert.commandWorked(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "ApplyOps"}));
-
- // Test default succeeds.
- updateOp.o.targetVersion = latestFCV;
- assert.commandWorked(db.adminCommand({
- applyOps: [updateOp],
- }));
-
- // ------------ Testing commands on the fCV collection ---------------
-
- var collModOp = {
- op: 'c',
- ns: systemVersionColl.getDB() + ".$cmd",
- o: {collMod: systemVersionColl.getName(), validationLevel: "off"},
- };
- assert.commandFailed(
- db.adminCommand({applyOps: [collModOp], oplogApplicationMode: "InitialSync"}));
-
- assert.commandWorked(
- db.adminCommand({applyOps: [collModOp], oplogApplicationMode: "ApplyOps"}));
-
- // Test default succeeds.
- assert.commandWorked(db.adminCommand({
- applyOps: [collModOp],
- }));
-
- MongoRunner.stopMongod(standalone);
+'use strict';
+load('jstests/libs/feature_compatibility_version.js');
+
+var standalone = MongoRunner.runMongod();
+var db = standalone.getDB("test");
+
+var coll = db.getCollection("apply_ops_mode1");
+coll.drop();
+assert.writeOK(coll.insert({_id: 1}));
+
+// ------------ Testing normal updates ---------------
+
+var id = ObjectId();
+var updateOp = {op: 'u', ns: coll.getFullName(), o: {_id: id, x: 1}, o2: {_id: id}};
+assert.commandFailed(db.adminCommand({applyOps: [updateOp], alwaysUpsert: false}));
+assert.eq(coll.count({x: 1}), 0);
+
+// Test that 'InitialSync' does not override 'alwaysUpsert: false'.
+assert.commandFailed(db.adminCommand(
+ {applyOps: [updateOp], alwaysUpsert: false, oplogApplicationMode: "InitialSync"}));
+assert.eq(coll.count({x: 1}), 0);
+
+// Test parsing failure.
+assert.commandFailedWithCode(
+ db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "BadMode"}),
+ ErrorCodes.FailedToParse);
+assert.commandFailedWithCode(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: 5}),
+ ErrorCodes.TypeMismatch);
+
+// Test default succeeds.
+assert.commandWorked(db.adminCommand({applyOps: [updateOp]}));
+assert.eq(coll.count({x: 1}), 1);
+
+// Use new collection to make logs cleaner.
+coll = db.getCollection("apply_ops_mode2");
+coll.drop();
+updateOp.ns = coll.getFullName();
+assert.writeOK(coll.insert({_id: 1}));
+
+// Test default succeeds in 'InitialSync' mode.
+assert.commandWorked(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"}));
+assert.eq(coll.count({x: 1}), 1);
+
+// ------------ Testing fCV updates ---------------
+
+var adminDB = db.getSiblingDB("admin");
+const systemVersionColl = adminDB.getCollection("system.version");
+
+updateOp = {
+ op: 'u',
+ ns: systemVersionColl.getFullName(),
+ o: {_id: "featureCompatibilityVersion", version: lastStableFCV},
+ o2: {_id: "featureCompatibilityVersion"}
+};
+assert.commandFailed(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "InitialSync"}));
+
+assert.commandWorked(db.adminCommand({applyOps: [updateOp], oplogApplicationMode: "ApplyOps"}));
+
+// Test default succeeds.
+updateOp.o.targetVersion = latestFCV;
+assert.commandWorked(db.adminCommand({
+ applyOps: [updateOp],
+}));
+
+// ------------ Testing commands on the fCV collection ---------------
+
+var collModOp = {
+ op: 'c',
+ ns: systemVersionColl.getDB() + ".$cmd",
+ o: {collMod: systemVersionColl.getName(), validationLevel: "off"},
+};
+assert.commandFailed(db.adminCommand({applyOps: [collModOp], oplogApplicationMode: "InitialSync"}));
+
+assert.commandWorked(db.adminCommand({applyOps: [collModOp], oplogApplicationMode: "ApplyOps"}));
+
+// Test default succeeds.
+assert.commandWorked(db.adminCommand({
+ applyOps: [collModOp],
+}));
+
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js b/jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js
index f02fad241d3..ca679919ab9 100644
--- a/jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js
+++ b/jstests/noPassthrough/apply_ops_overwrite_admin_system_version.js
@@ -1,41 +1,41 @@
(function() {
- "use strict";
- load("jstests/libs/feature_compatibility_version.js");
- var standalone = MongoRunner.runMongod();
- var adminDB = standalone.getDB("admin");
+"use strict";
+load("jstests/libs/feature_compatibility_version.js");
+var standalone = MongoRunner.runMongod();
+var adminDB = standalone.getDB("admin");
- // Get the uuid of the original admin.system.version.
- var res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
- assert.commandWorked(res, "failed to list collections");
- assert.eq(1, res.cursor.firstBatch.length);
- var originalUUID = res.cursor.firstBatch[0].info.uuid;
- var newUUID = UUID();
+// Get the uuid of the original admin.system.version.
+var res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
+assert.commandWorked(res, "failed to list collections");
+assert.eq(1, res.cursor.firstBatch.length);
+var originalUUID = res.cursor.firstBatch[0].info.uuid;
+var newUUID = UUID();
- // Create new collection, insert new FCV document and then delete the
- // original collection.
- var createNewAdminSystemVersionCollection =
- {op: "c", ns: "admin.$cmd", ui: newUUID, o: {create: "system.version"}};
- var insertFCVDocument = {
- op: "i",
- ns: "admin.system.version",
- o: {_id: "featureCompatibilityVersion", version: latestFCV}
- };
- var dropOriginalAdminSystemVersionCollection =
- {op: "c", ns: "admin.$cmd", ui: originalUUID, o: {drop: "admin.tmp_system_version"}};
- var cmd = {
- applyOps: [
- createNewAdminSystemVersionCollection,
- insertFCVDocument,
- dropOriginalAdminSystemVersionCollection
- ]
- };
- assert.commandWorked(adminDB.runCommand(cmd), "failed command " + tojson(cmd));
+// Create new collection, insert new FCV document and then delete the
+// original collection.
+var createNewAdminSystemVersionCollection =
+ {op: "c", ns: "admin.$cmd", ui: newUUID, o: {create: "system.version"}};
+var insertFCVDocument = {
+ op: "i",
+ ns: "admin.system.version",
+ o: {_id: "featureCompatibilityVersion", version: latestFCV}
+};
+var dropOriginalAdminSystemVersionCollection =
+ {op: "c", ns: "admin.$cmd", ui: originalUUID, o: {drop: "admin.tmp_system_version"}};
+var cmd = {
+ applyOps: [
+ createNewAdminSystemVersionCollection,
+ insertFCVDocument,
+ dropOriginalAdminSystemVersionCollection
+ ]
+};
+assert.commandWorked(adminDB.runCommand(cmd), "failed command " + tojson(cmd));
- // Now admin.system.version is overwritten with the new entry.
- res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
- assert.commandWorked(res, "failed to list collections");
- assert.eq(1, res.cursor.firstBatch.length);
- assert.eq(newUUID, res.cursor.firstBatch[0].info.uuid);
+// Now admin.system.version is overwritten with the new entry.
+res = adminDB.runCommand({listCollections: 1, filter: {name: "system.version"}});
+assert.commandWorked(res, "failed to list collections");
+assert.eq(1, res.cursor.firstBatch.length);
+assert.eq(newUUID, res.cursor.firstBatch[0].info.uuid);
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/noPassthrough/atomic_rename_collection.js b/jstests/noPassthrough/atomic_rename_collection.js
index c8e24f1c591..a6f39c1c40f 100644
--- a/jstests/noPassthrough/atomic_rename_collection.js
+++ b/jstests/noPassthrough/atomic_rename_collection.js
@@ -1,47 +1,47 @@
// @tags: [requires_replication]
(function() {
- // SERVER-28285 When renameCollection drops the target collection, it should just generate
- // a single oplog entry, so we cannot end up in a state where the drop has succeeded, but
- // the rename didn't.
- let rs = new ReplSetTest({nodes: 1});
- rs.startSet();
- rs.initiate();
+// SERVER-28285 When renameCollection drops the target collection, it should just generate
+// a single oplog entry, so we cannot end up in a state where the drop has succeeded, but
+// the rename didn't.
+let rs = new ReplSetTest({nodes: 1});
+rs.startSet();
+rs.initiate();
- let prim = rs.getPrimary();
- let first = prim.getDB("first");
- let second = prim.getDB("second");
- let local = prim.getDB("local");
+let prim = rs.getPrimary();
+let first = prim.getDB("first");
+let second = prim.getDB("second");
+let local = prim.getDB("local");
- // Test both for rename within a database as across databases.
- const tests = [
- {
- source: first.x,
- target: first.y,
- expectedOplogEntries: 1,
- },
- {
- source: first.x,
- target: second.x,
- expectedOplogEntries: 4,
- }
- ];
- tests.forEach((test) => {
- test.source.drop();
- assert.writeOK(test.source.insert({}));
- assert.writeOK(test.target.insert({}));
+// Test both for rename within a database as across databases.
+const tests = [
+ {
+ source: first.x,
+ target: first.y,
+ expectedOplogEntries: 1,
+ },
+ {
+ source: first.x,
+ target: second.x,
+ expectedOplogEntries: 4,
+ }
+];
+tests.forEach((test) => {
+ test.source.drop();
+ assert.writeOK(test.source.insert({}));
+ assert.writeOK(test.target.insert({}));
- let ts = local.oplog.rs.find().sort({$natural: -1}).limit(1).next().ts;
- let cmd = {
- renameCollection: test.source.toString(),
- to: test.target.toString(),
- dropTarget: true
- };
- assert.commandWorked(local.adminCommand(cmd), tojson(cmd));
- ops = local.oplog.rs.find({ts: {$gt: ts}}).sort({$natural: 1}).toArray();
- assert.eq(ops.length,
- test.expectedOplogEntries,
- "renameCollection was supposed to only generate " + test.expectedOplogEntries +
- " oplog entries: " + tojson(ops));
- });
- rs.stopSet();
+ let ts = local.oplog.rs.find().sort({$natural: -1}).limit(1).next().ts;
+ let cmd = {
+ renameCollection: test.source.toString(),
+ to: test.target.toString(),
+ dropTarget: true
+ };
+ assert.commandWorked(local.adminCommand(cmd), tojson(cmd));
+ ops = local.oplog.rs.find({ts: {$gt: ts}}).sort({$natural: 1}).toArray();
+ assert.eq(ops.length,
+ test.expectedOplogEntries,
+ "renameCollection was supposed to only generate " + test.expectedOplogEntries +
+ " oplog entries: " + tojson(ops));
+});
+rs.stopSet();
})();
diff --git a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
index c67482ae5b9..ca4e3da965c 100644
--- a/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
+++ b/jstests/noPassthrough/auth_reject_mismatching_logical_times.js
@@ -4,73 +4,72 @@
* @tags: [requires_replication, requires_sharding]
*/
(function() {
- "use strict";
+"use strict";
- // Given a valid cluster time object, returns one with the same signature, but a mismatching
- // cluster time.
- function mismatchingLogicalTime(lt) {
- return Object.merge(lt, {clusterTime: Timestamp(lt.clusterTime.getTime() + 100, 0)});
- }
+// Given a valid cluster time object, returns one with the same signature, but a mismatching
+// cluster time.
+function mismatchingLogicalTime(lt) {
+ return Object.merge(lt, {clusterTime: Timestamp(lt.clusterTime.getTime() + 100, 0)});
+}
- function assertRejectsMismatchingLogicalTime(db) {
- let validTime = db.runCommand({isMaster: 1}).$clusterTime;
- let mismatchingTime = mismatchingLogicalTime(validTime);
+function assertRejectsMismatchingLogicalTime(db) {
+ let validTime = db.runCommand({isMaster: 1}).$clusterTime;
+ let mismatchingTime = mismatchingLogicalTime(validTime);
- assert.commandFailedWithCode(
- db.runCommand({isMaster: 1, $clusterTime: mismatchingTime}),
- ErrorCodes.TimeProofMismatch,
- "expected command with mismatching cluster time and signature to be rejected");
- }
+ assert.commandFailedWithCode(
+ db.runCommand({isMaster: 1, $clusterTime: mismatchingTime}),
+ ErrorCodes.TimeProofMismatch,
+ "expected command with mismatching cluster time and signature to be rejected");
+}
- function assertAcceptsValidLogicalTime(db) {
- let validTime = db.runCommand({isMaster: 1}).$clusterTime;
- assert.commandWorked(
- testDB.runCommand({isMaster: 1, $clusterTime: validTime}),
- "expected command with valid cluster time and signature to be accepted");
- }
+function assertAcceptsValidLogicalTime(db) {
+ let validTime = db.runCommand({isMaster: 1}).$clusterTime;
+ assert.commandWorked(testDB.runCommand({isMaster: 1, $clusterTime: validTime}),
+ "expected command with valid cluster time and signature to be accepted");
+}
- // Start the sharding test with auth on.
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest({
- mongos: 1,
- manualAddShard: true,
- other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}
- });
+// Start the sharding test with auth on.
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest({
+ mongos: 1,
+ manualAddShard: true,
+ other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}
+});
- // Create admin user and authenticate as them.
- st.s.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
- st.s.getDB("admin").auth("foo", "bar");
+// Create admin user and authenticate as them.
+st.s.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
+st.s.getDB("admin").auth("foo", "bar");
- // Add shard with auth enabled.
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet({keyFile: "jstests/libs/key1", shardsvr: ""});
+// Add shard with auth enabled.
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet({keyFile: "jstests/libs/key1", shardsvr: ""});
- // TODO: Wait for stable recovery timestamp when SERVER-32672 is fixed.
- rst.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
- assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
+// TODO: Wait for stable recovery timestamp when SERVER-32672 is fixed.
+rst.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()}));
- const testDB = st.s.getDB("test");
+const testDB = st.s.getDB("test");
- // Unsharded collections reject mismatching cluster times and accept valid ones.
- assertRejectsMismatchingLogicalTime(testDB);
- assertAcceptsValidLogicalTime(testDB);
+// Unsharded collections reject mismatching cluster times and accept valid ones.
+assertRejectsMismatchingLogicalTime(testDB);
+assertAcceptsValidLogicalTime(testDB);
- // Initialize sharding.
- assert.commandWorked(testDB.adminCommand({enableSharding: "test"}));
- assert.commandWorked(
- testDB.adminCommand({shardCollection: testDB.foo.getFullName(), key: {_id: 1}}));
+// Initialize sharding.
+assert.commandWorked(testDB.adminCommand({enableSharding: "test"}));
+assert.commandWorked(
+ testDB.adminCommand({shardCollection: testDB.foo.getFullName(), key: {_id: 1}}));
- // Sharded collections reject mismatching cluster times and accept valid ones.
- assertRejectsMismatchingLogicalTime(testDB);
- assertAcceptsValidLogicalTime(testDB);
+// Sharded collections reject mismatching cluster times and accept valid ones.
+assertRejectsMismatchingLogicalTime(testDB);
+assertAcceptsValidLogicalTime(testDB);
- // Shards and config servers also reject mismatching times and accept valid ones.
- assertRejectsMismatchingLogicalTime(rst.getPrimary().getDB("test"));
- assertAcceptsValidLogicalTime(rst.getPrimary().getDB("test"));
- assertRejectsMismatchingLogicalTime(st.configRS.getPrimary().getDB("admin"));
- assertAcceptsValidLogicalTime(st.configRS.getPrimary().getDB("admin"));
+// Shards and config servers also reject mismatching times and accept valid ones.
+assertRejectsMismatchingLogicalTime(rst.getPrimary().getDB("test"));
+assertAcceptsValidLogicalTime(rst.getPrimary().getDB("test"));
+assertRejectsMismatchingLogicalTime(st.configRS.getPrimary().getDB("admin"));
+assertAcceptsValidLogicalTime(st.configRS.getPrimary().getDB("admin"));
- st.stop();
- rst.stopSet();
+st.stop();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/auto_retry_on_network_error.js b/jstests/noPassthrough/auto_retry_on_network_error.js
index 03e486a5a05..1c5f8465ebb 100644
--- a/jstests/noPassthrough/auto_retry_on_network_error.js
+++ b/jstests/noPassthrough/auto_retry_on_network_error.js
@@ -4,110 +4,111 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- TestData.networkErrorAndTxnOverrideConfig = {retryOnNetworkErrors: true};
- load('jstests/libs/override_methods/network_error_and_txn_override.js');
- load("jstests/replsets/rslib.js");
-
- function getThreadName(db) {
- let myUri = db.adminCommand({whatsmyuri: 1}).you;
- return db.getSiblingDB("admin")
- .aggregate([{$currentOp: {localOps: true}}, {$match: {client: myUri}}])
- .toArray()[0]
- .desc;
- }
-
- function failNextCommand(db, command) {
- let threadName = getThreadName(db);
-
- assert.commandWorked(db.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 1},
- data: {
- closeConnection: true,
- failCommands: [command],
- threadName: threadName,
- }
- }));
- }
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
-
- // awaitLastStableRecoveryTimestamp runs an 'appendOplogNote' command which is not retryable.
- rst.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
-
- const dbName = "test";
- const collName = "auto_retry";
-
- // The override requires the connection to be run under a session. Use the replica set URL to
- // allow automatic re-targeting of the primary on NotMaster errors.
- const db = new Mongo(rst.getURL()).startSession({retryWrites: true}).getDatabase(dbName);
-
- // Commands with no disconnections should work as normal.
- assert.commandWorked(db.runCommand({ping: 1}));
- assert.commandWorked(db.runCommandWithMetadata({ping: 1}, {}).commandReply);
-
- // Read commands are automatically retried on network errors.
- failNextCommand(db, "find");
- assert.commandWorked(db.runCommand({find: collName}));
-
- failNextCommand(db, "find");
- assert.commandWorked(db.runCommandWithMetadata({find: collName}, {}).commandReply);
-
- // Retryable write commands that can be retried succeed.
- failNextCommand(db, "insert");
- assert.writeOK(db[collName].insert({x: 1}));
-
- failNextCommand(db, "insert");
- assert.commandWorked(db.runCommandWithMetadata({
- insert: collName,
- documents: [{x: 2}, {x: 3}],
- txnNumber: NumberLong(10),
- lsid: {id: UUID()}
- },
- {})
- .commandReply);
-
- // Retryable write commands that cannot be retried (i.e. no transaction number, no session id,
- // or are unordered) throw.
- failNextCommand(db, "insert");
- assert.throws(function() {
- db.runCommand({insert: collName, documents: [{x: 1}, {x: 2}], ordered: false});
- });
-
- // The previous command shouldn't have been retried, so run a command to successfully re-target
- // the primary, so the connection to it can be closed.
- assert.commandWorked(db.runCommandWithMetadata({ping: 1}, {}).commandReply);
-
- failNextCommand(db, "insert");
- assert.throws(function() {
- db.runCommandWithMetadata({insert: collName, documents: [{x: 1}, {x: 2}], ordered: false},
- {});
- });
-
- // getMore commands can't be retried because we won't know whether the cursor was advanced or
- // not.
- let cursorId = assert.commandWorked(db.runCommand({find: collName, batchSize: 0})).cursor.id;
- failNextCommand(db, "getMore");
- assert.throws(function() {
- db.runCommand({getMore: cursorId, collection: collName});
- });
-
- cursorId = assert.commandWorked(db.runCommand({find: collName, batchSize: 0})).cursor.id;
- failNextCommand(db, "getMore");
- assert.throws(function() {
- db.runCommandWithMetadata({getMore: cursorId, collection: collName}, {});
- });
-
- rst.stopSet();
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+TestData.networkErrorAndTxnOverrideConfig = {
+ retryOnNetworkErrors: true
+};
+load('jstests/libs/override_methods/network_error_and_txn_override.js');
+load("jstests/replsets/rslib.js");
+
+function getThreadName(db) {
+ let myUri = db.adminCommand({whatsmyuri: 1}).you;
+ return db.getSiblingDB("admin")
+ .aggregate([{$currentOp: {localOps: true}}, {$match: {client: myUri}}])
+ .toArray()[0]
+ .desc;
+}
+
+function failNextCommand(db, command) {
+ let threadName = getThreadName(db);
+
+ assert.commandWorked(db.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 1},
+ data: {
+ closeConnection: true,
+ failCommands: [command],
+ threadName: threadName,
+ }
+ }));
+}
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+
+// awaitLastStableRecoveryTimestamp runs an 'appendOplogNote' command which is not retryable.
+rst.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+const dbName = "test";
+const collName = "auto_retry";
+
+// The override requires the connection to be run under a session. Use the replica set URL to
+// allow automatic re-targeting of the primary on NotMaster errors.
+const db = new Mongo(rst.getURL()).startSession({retryWrites: true}).getDatabase(dbName);
+
+// Commands with no disconnections should work as normal.
+assert.commandWorked(db.runCommand({ping: 1}));
+assert.commandWorked(db.runCommandWithMetadata({ping: 1}, {}).commandReply);
+
+// Read commands are automatically retried on network errors.
+failNextCommand(db, "find");
+assert.commandWorked(db.runCommand({find: collName}));
+
+failNextCommand(db, "find");
+assert.commandWorked(db.runCommandWithMetadata({find: collName}, {}).commandReply);
+
+// Retryable write commands that can be retried succeed.
+failNextCommand(db, "insert");
+assert.writeOK(db[collName].insert({x: 1}));
+
+failNextCommand(db, "insert");
+assert.commandWorked(db.runCommandWithMetadata({
+ insert: collName,
+ documents: [{x: 2}, {x: 3}],
+ txnNumber: NumberLong(10),
+ lsid: {id: UUID()}
+ },
+ {})
+ .commandReply);
+
+// Retryable write commands that cannot be retried (i.e. no transaction number, no session id,
+// or are unordered) throw.
+failNextCommand(db, "insert");
+assert.throws(function() {
+ db.runCommand({insert: collName, documents: [{x: 1}, {x: 2}], ordered: false});
+});
+
+// The previous command shouldn't have been retried, so run a command to successfully re-target
+// the primary, so the connection to it can be closed.
+assert.commandWorked(db.runCommandWithMetadata({ping: 1}, {}).commandReply);
+
+failNextCommand(db, "insert");
+assert.throws(function() {
+ db.runCommandWithMetadata({insert: collName, documents: [{x: 1}, {x: 2}], ordered: false}, {});
+});
+
+// getMore commands can't be retried because we won't know whether the cursor was advanced or
+// not.
+let cursorId = assert.commandWorked(db.runCommand({find: collName, batchSize: 0})).cursor.id;
+failNextCommand(db, "getMore");
+assert.throws(function() {
+ db.runCommand({getMore: cursorId, collection: collName});
+});
+
+cursorId = assert.commandWorked(db.runCommand({find: collName, batchSize: 0})).cursor.id;
+failNextCommand(db, "getMore");
+assert.throws(function() {
+ db.runCommandWithMetadata({getMore: cursorId, collection: collName}, {});
+});
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/backup_restore_fsync_lock.js b/jstests/noPassthrough/backup_restore_fsync_lock.js
index 86f45dace3f..a6728a2af10 100644
--- a/jstests/noPassthrough/backup_restore_fsync_lock.js
+++ b/jstests/noPassthrough/backup_restore_fsync_lock.js
@@ -17,9 +17,9 @@
load("jstests/noPassthrough/libs/backup_restore.js");
(function() {
- "use strict";
+"use strict";
- // Run the fsyncLock test. Will return before testing for any engine that doesn't
- // support fsyncLock
- new BackupRestoreTest({backup: 'fsyncLock'}).run();
+// Run the fsyncLock test. Will return before testing for any engine that doesn't
+// support fsyncLock
+new BackupRestoreTest({backup: 'fsyncLock'}).run();
}());
diff --git a/jstests/noPassthrough/backup_restore_rolling.js b/jstests/noPassthrough/backup_restore_rolling.js
index ddc995e4f5a..8196409c7b5 100644
--- a/jstests/noPassthrough/backup_restore_rolling.js
+++ b/jstests/noPassthrough/backup_restore_rolling.js
@@ -17,28 +17,28 @@
load("jstests/noPassthrough/libs/backup_restore.js");
(function() {
- "use strict";
+"use strict";
- // Grab the storage engine, default is wiredTiger
- var storageEngine = jsTest.options().storageEngine || "wiredTiger";
+// Grab the storage engine, default is wiredTiger
+var storageEngine = jsTest.options().storageEngine || "wiredTiger";
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (storageEngine !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- // if rsync is not available on the host, then this test is skipped
- if (!runProgram('bash', '-c', 'which rsync')) {
- new BackupRestoreTest({backup: 'rolling', clientTime: 30000}).run();
- } else {
- jsTestLog("Skipping test for " + storageEngine + ' rolling');
- }
+// if rsync is not available on the host, then this test is skipped
+if (!runProgram('bash', '-c', 'which rsync')) {
+ new BackupRestoreTest({backup: 'rolling', clientTime: 30000}).run();
+} else {
+ jsTestLog("Skipping test for " + storageEngine + ' rolling');
+}
}());
diff --git a/jstests/noPassthrough/backup_restore_stop_start.js b/jstests/noPassthrough/backup_restore_stop_start.js
index a96c7c535bd..3aea0d4cb4f 100644
--- a/jstests/noPassthrough/backup_restore_stop_start.js
+++ b/jstests/noPassthrough/backup_restore_stop_start.js
@@ -17,7 +17,7 @@
load("jstests/noPassthrough/libs/backup_restore.js");
(function() {
- "use strict";
+"use strict";
- new BackupRestoreTest({backup: 'stopStart', clientTime: 30000}).run();
+new BackupRestoreTest({backup: 'stopStart', clientTime: 30000}).run();
}());
diff --git a/jstests/noPassthrough/bind_all_ipv6.js b/jstests/noPassthrough/bind_all_ipv6.js
index 9e47ccd5796..9663f964118 100644
--- a/jstests/noPassthrough/bind_all_ipv6.js
+++ b/jstests/noPassthrough/bind_all_ipv6.js
@@ -1,10 +1,10 @@
// Startup with --bind_ip_all and --ipv6 should not fail with address already in use.
(function() {
- 'use strict';
+'use strict';
- const mongo = MongoRunner.runMongod({ipv6: "", bind_ip_all: ""});
- assert(mongo !== null, "Database is not running");
- assert.commandWorked(mongo.getDB("test").isMaster(), "isMaster failed");
- MongoRunner.stopMongod(mongo);
+const mongo = MongoRunner.runMongod({ipv6: "", bind_ip_all: ""});
+assert(mongo !== null, "Database is not running");
+assert.commandWorked(mongo.getDB("test").isMaster(), "isMaster failed");
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/bind_ip_all.js b/jstests/noPassthrough/bind_ip_all.js
index e840cb2e404..216b41b2ca8 100644
--- a/jstests/noPassthrough/bind_ip_all.js
+++ b/jstests/noPassthrough/bind_ip_all.js
@@ -1,23 +1,23 @@
// Startup with --bind_ip_all should override net.bindIp and vice versa.
(function() {
- 'use strict';
+'use strict';
- const port = allocatePort();
- const BINDIP = 'jstests/noPassthrough/libs/net.bindIp_localhost.yaml';
- const BINDIPALL = 'jstests/noPassthrough/libs/net.bindIpAll.yaml';
+const port = allocatePort();
+const BINDIP = 'jstests/noPassthrough/libs/net.bindIp_localhost.yaml';
+const BINDIPALL = 'jstests/noPassthrough/libs/net.bindIpAll.yaml';
- function runTest(config, opt, expectStar, expectLocalhost) {
- clearRawMongoProgramOutput();
- const mongod =
- runMongoProgram('./mongod', '--port', port, '--config', config, opt, '--outputConfig');
- assert.eq(mongod, 0);
- const output = rawMongoProgramOutput();
- assert.eq(output.search(/bindIp: "\*"/) >= 0, expectStar, output);
- assert.eq(output.search(/bindIp: localhost/) >= 0, expectLocalhost, output);
- assert.eq(output.search(/bindIpAll:/) >= 0, false, output);
- }
+function runTest(config, opt, expectStar, expectLocalhost) {
+ clearRawMongoProgramOutput();
+ const mongod =
+ runMongoProgram('./mongod', '--port', port, '--config', config, opt, '--outputConfig');
+ assert.eq(mongod, 0);
+ const output = rawMongoProgramOutput();
+ assert.eq(output.search(/bindIp: "\*"/) >= 0, expectStar, output);
+ assert.eq(output.search(/bindIp: localhost/) >= 0, expectLocalhost, output);
+ assert.eq(output.search(/bindIpAll:/) >= 0, false, output);
+}
- runTest(BINDIP, '--bind_ip_all', true, false);
- runTest(BINDIPALL, '--bind_ip=localhost', false, true);
+runTest(BINDIP, '--bind_ip_all', true, false);
+runTest(BINDIPALL, '--bind_ip=localhost', false, true);
}());
diff --git a/jstests/noPassthrough/bind_localhost.js b/jstests/noPassthrough/bind_localhost.js
index 959c4b70541..242b559831d 100644
--- a/jstests/noPassthrough/bind_localhost.js
+++ b/jstests/noPassthrough/bind_localhost.js
@@ -1,15 +1,15 @@
// Log bound addresses at startup.
(function() {
- 'use strict';
+'use strict';
- const mongo = MongoRunner.runMongod({ipv6: '', bind_ip: 'localhost', useLogFiles: true});
- assert.neq(mongo, null, "Database is not running");
- const log = cat(mongo.fullOptions.logFile);
- print(log);
- assert(log.includes('Listening on 127.0.0.1'), "Not listening on AF_INET");
- if (!_isWindows()) {
- assert(log.match(/Listening on .*\.sock/), "Not listening on AF_UNIX");
- }
- MongoRunner.stopMongod(mongo);
+const mongo = MongoRunner.runMongod({ipv6: '', bind_ip: 'localhost', useLogFiles: true});
+assert.neq(mongo, null, "Database is not running");
+const log = cat(mongo.fullOptions.logFile);
+print(log);
+assert(log.includes('Listening on 127.0.0.1'), "Not listening on AF_INET");
+if (!_isWindows()) {
+ assert(log.match(/Listening on .*\.sock/), "Not listening on AF_UNIX");
+}
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/block_compressor_options.js b/jstests/noPassthrough/block_compressor_options.js
index ebc21f41ceb..129a2a567df 100644
--- a/jstests/noPassthrough/block_compressor_options.js
+++ b/jstests/noPassthrough/block_compressor_options.js
@@ -15,38 +15,38 @@
* @tags: [requires_persistence,requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- // On the first iteration, start a mongod. Subsequent iterations will close and restart on the
- // same dbpath.
- let firstIteration = true;
- let compressors = ['none', 'snappy', 'zlib', 'zstd'];
- let mongo;
- for (let compressor of compressors) {
- jsTestLog({"Starting with compressor": compressor});
- if (firstIteration) {
- mongo = MongoRunner.runMongod({
- wiredTigerCollectionBlockCompressor: compressor,
- wiredTigerJournalCompressor: compressor
- });
- firstIteration = false;
- } else {
- MongoRunner.stopMongod(mongo);
- mongo = MongoRunner.runMongod({
- restart: true,
- dbpath: mongo.dbpath,
- cleanData: false,
- wiredTigerCollectionBlockCompressor: compressor
- });
- }
- mongo.getDB('db')[compressor].insert({});
+// On the first iteration, start a mongod. Subsequent iterations will close and restart on the
+// same dbpath.
+let firstIteration = true;
+let compressors = ['none', 'snappy', 'zlib', 'zstd'];
+let mongo;
+for (let compressor of compressors) {
+ jsTestLog({"Starting with compressor": compressor});
+ if (firstIteration) {
+ mongo = MongoRunner.runMongod({
+ wiredTigerCollectionBlockCompressor: compressor,
+ wiredTigerJournalCompressor: compressor
+ });
+ firstIteration = false;
+ } else {
+ MongoRunner.stopMongod(mongo);
+ mongo = MongoRunner.runMongod({
+ restart: true,
+ dbpath: mongo.dbpath,
+ cleanData: false,
+ wiredTigerCollectionBlockCompressor: compressor
+ });
}
+ mongo.getDB('db')[compressor].insert({});
+}
- for (let compressor of compressors) {
- jsTestLog({"Asserting collection compressor": compressor});
- let stats = mongo.getDB('db')[compressor].stats();
- assert(stats['wiredTiger']['creationString'].search('block_compressor=' + compressor) > -1);
- }
+for (let compressor of compressors) {
+ jsTestLog({"Asserting collection compressor": compressor});
+ let stats = mongo.getDB('db')[compressor].stats();
+ assert(stats['wiredTiger']['creationString'].search('block_compressor=' + compressor) > -1);
+}
- MongoRunner.stopMongod(mongo);
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js b/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js
index a9fd668f304..ff211b2424f 100644
--- a/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js
+++ b/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js
@@ -4,50 +4,48 @@
// This test uses the WiredTiger storage engine, which does not support running without journaling.
// @tags: [requires_replication,requires_journaling]
(function() {
- "use strict";
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+"use strict";
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
- const db = rst.getPrimary().getDB("test");
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
+const db = rst.getPrimary().getDB("test");
- let unique_dbName = jsTestName();
- const sleepShell = startParallelShell(() => {
- assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", seconds: 600}),
- ErrorCodes.Interrupted);
- }, rst.getPrimary().port);
- assert.soon(
- () =>
- db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog.length ===
- 1);
- const sleepOps = db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog;
- assert.eq(sleepOps.length, 1);
- const sleepOpId = sleepOps[0].opid;
+let unique_dbName = jsTestName();
+const sleepShell = startParallelShell(() => {
+ assert.commandFailedWithCode(db.adminCommand({sleep: 1, lock: "w", seconds: 600}),
+ ErrorCodes.Interrupted);
+}, rst.getPrimary().port);
+assert.soon(
+ () =>
+ db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog.length === 1);
+const sleepOps = db.getSiblingDB("admin").currentOp({"command.sleep": 1, active: true}).inprog;
+assert.eq(sleepOps.length, 1);
+const sleepOpId = sleepOps[0].opid;
- // Start two concurrent shells which will both attempt to create the database which does not yet
- // exist.
- const openChangeStreamCode = `const cursor = db.getSiblingDB("${unique_dbName}").test.watch();`;
- const changeStreamShell1 = startParallelShell(openChangeStreamCode, rst.getPrimary().port);
- const changeStreamShell2 = startParallelShell(openChangeStreamCode, rst.getPrimary().port);
+// Start two concurrent shells which will both attempt to create the database which does not yet
+// exist.
+const openChangeStreamCode = `const cursor = db.getSiblingDB("${unique_dbName}").test.watch();`;
+const changeStreamShell1 = startParallelShell(openChangeStreamCode, rst.getPrimary().port);
+const changeStreamShell2 = startParallelShell(openChangeStreamCode, rst.getPrimary().port);
- // Wait until we can see both change streams have started and are waiting to acquire the lock
- // held by the sleep command.
- assert.soon(
- () =>
- db.currentOp({"command.aggregate": "test", waitingForLock: true}).inprog.length === 2);
- assert.commandWorked(db.adminCommand({killOp: 1, op: sleepOpId}));
+// Wait until we can see both change streams have started and are waiting to acquire the lock
+// held by the sleep command.
+assert.soon(
+ () => db.currentOp({"command.aggregate": "test", waitingForLock: true}).inprog.length === 2);
+assert.commandWorked(db.adminCommand({killOp: 1, op: sleepOpId}));
- sleepShell();
+sleepShell();
- // Before the fix for SERVER-34333, the operations in these shells would be deadlocked with each
- // other and never complete.
- changeStreamShell1();
- changeStreamShell2();
+// Before the fix for SERVER-34333, the operations in these shells would be deadlocked with each
+// other and never complete.
+changeStreamShell1();
+changeStreamShell2();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_stream_failover.js b/jstests/noPassthrough/change_stream_failover.js
index 8168c7722de..b8ec132fdd8 100644
--- a/jstests/noPassthrough/change_stream_failover.js
+++ b/jstests/noPassthrough/change_stream_failover.js
@@ -3,90 +3,89 @@
// This test uses the WiredTiger storage engine, which does not support running without journaling.
// @tags: [requires_replication,requires_journaling]
(function() {
- "use strict";
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- const rst = new ReplSetTest({nodes: 3});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
-
- rst.initiate();
-
- for (let key of Object.keys(ChangeStreamWatchMode)) {
- const watchMode = ChangeStreamWatchMode[key];
- jsTestLog("Running test for mode " + watchMode);
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB("test");
- const coll = assertDropAndRecreateCollection(primaryDB, "change_stream_failover");
-
- // Be sure we'll only read from the primary.
- primary.setReadPref("primary");
-
- // Open a changeStream on the primary.
- const cst =
- new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, primaryDB));
-
- let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
-
- // Be sure we can read from the change stream. Use {w: "majority"} so that we're still
- // guaranteed to be able to read after the failover.
- assert.writeOK(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
-
- const firstChange = cst.getOneChange(changeStream);
- assert.docEq(firstChange.fullDocument, {_id: 0});
-
- // Make the primary step down
- assert.commandWorked(primaryDB.adminCommand({replSetStepDown: 30}));
-
- // Now wait for another primary to be elected.
- const newPrimary = rst.getPrimary();
- // Be sure we got a different node that the previous primary.
- assert.neq(newPrimary.port, primary.port);
-
- cst.assertNextChangesEqual({
- cursor: changeStream,
- expectedChanges: [{
+"use strict";
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+const rst = new ReplSetTest({nodes: 3});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+
+rst.initiate();
+
+for (let key of Object.keys(ChangeStreamWatchMode)) {
+ const watchMode = ChangeStreamWatchMode[key];
+ jsTestLog("Running test for mode " + watchMode);
+
+ const primary = rst.getPrimary();
+ const primaryDB = primary.getDB("test");
+ const coll = assertDropAndRecreateCollection(primaryDB, "change_stream_failover");
+
+ // Be sure we'll only read from the primary.
+ primary.setReadPref("primary");
+
+ // Open a changeStream on the primary.
+ const cst = new ChangeStreamTest(ChangeStreamTest.getDBForChangeStream(watchMode, primaryDB));
+
+ let changeStream = cst.getChangeStream({watchMode: watchMode, coll: coll});
+
+ // Be sure we can read from the change stream. Use {w: "majority"} so that we're still
+ // guaranteed to be able to read after the failover.
+ assert.writeOK(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+ assert.writeOK(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+
+ const firstChange = cst.getOneChange(changeStream);
+ assert.docEq(firstChange.fullDocument, {_id: 0});
+
+ // Make the primary step down
+ assert.commandWorked(primaryDB.adminCommand({replSetStepDown: 30}));
+
+ // Now wait for another primary to be elected.
+ const newPrimary = rst.getPrimary();
+ // Be sure we got a different node that the previous primary.
+ assert.neq(newPrimary.port, primary.port);
+
+ cst.assertNextChangesEqual({
+ cursor: changeStream,
+ expectedChanges: [{
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1},
+ ns: {db: primaryDB.getName(), coll: coll.getName()},
+ operationType: "insert",
+ }]
+ });
+
+ // Now resume using the resume token from the first change (before the failover).
+ const resumeCursor =
+ cst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
+
+ // Be sure we can read the 2nd and 3rd changes.
+ cst.assertNextChangesEqual({
+ cursor: resumeCursor,
+ expectedChanges: [
+ {
documentKey: {_id: 1},
fullDocument: {_id: 1},
ns: {db: primaryDB.getName(), coll: coll.getName()},
operationType: "insert",
- }]
- });
-
- // Now resume using the resume token from the first change (before the failover).
- const resumeCursor =
- cst.getChangeStream({watchMode: watchMode, coll: coll, resumeAfter: firstChange._id});
-
- // Be sure we can read the 2nd and 3rd changes.
- cst.assertNextChangesEqual({
- cursor: resumeCursor,
- expectedChanges: [
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1},
- ns: {db: primaryDB.getName(), coll: coll.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 2},
- fullDocument: {_id: 2},
- ns: {db: primaryDB.getName(), coll: coll.getName()},
- operationType: "insert",
- }
- ]
- });
-
- // Unfreeze the original primary so that it can stand for election again.
- assert.commandWorked(primaryDB.adminCommand({replSetFreeze: 0}));
- }
+ },
+ {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2},
+ ns: {db: primaryDB.getName(), coll: coll.getName()},
+ operationType: "insert",
+ }
+ ]
+ });
- rst.stopSet();
+ // Unfreeze the original primary so that it can stand for election again.
+ assert.commandWorked(primaryDB.adminCommand({replSetFreeze: 0}));
+}
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_stream_resume_before_add_shard.js b/jstests/noPassthrough/change_stream_resume_before_add_shard.js
index d987b55bea5..c3b46e9b79c 100644
--- a/jstests/noPassthrough/change_stream_resume_before_add_shard.js
+++ b/jstests/noPassthrough/change_stream_resume_before_add_shard.js
@@ -4,113 +4,115 @@
* @tags: [uses_change_streams, requires_sharding]
*/
(function() {
- "use strict";
-
- const rsNodeOptions = {setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}};
- const st =
- new ShardingTest({shards: 1, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
-
- const mongosDB = st.s.getDB(jsTestName());
- const coll = mongosDB.test;
-
- // Helper function to confirm that a stream sees an expected sequence of documents. This
- // function also pushes all observed changes into the supplied 'eventList' array.
- function assertAllEventsObserved(changeStream, expectedDocs, eventList) {
- for (let expectedDoc of expectedDocs) {
- assert.soon(() => changeStream.hasNext());
- const nextEvent = changeStream.next();
- assert.eq(nextEvent.fullDocument, expectedDoc);
- if (eventList) {
- eventList.push(nextEvent);
- }
+"use strict";
+
+const rsNodeOptions = {
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+};
+const st =
+ new ShardingTest({shards: 1, mongos: 1, rs: {nodes: 1}, other: {rsOptions: rsNodeOptions}});
+
+const mongosDB = st.s.getDB(jsTestName());
+const coll = mongosDB.test;
+
+// Helper function to confirm that a stream sees an expected sequence of documents. This
+// function also pushes all observed changes into the supplied 'eventList' array.
+function assertAllEventsObserved(changeStream, expectedDocs, eventList) {
+ for (let expectedDoc of expectedDocs) {
+ assert.soon(() => changeStream.hasNext());
+ const nextEvent = changeStream.next();
+ assert.eq(nextEvent.fullDocument, expectedDoc);
+ if (eventList) {
+ eventList.push(nextEvent);
}
}
-
- // Helper function to add a new ReplSetTest shard into the cluster. Using single-node shards
- // ensures that the "initiating set" entry cannot be rolled back.
- function addShardToCluster(shardName) {
- const replTest = new ReplSetTest({name: shardName, nodes: 1, nodeOptions: rsNodeOptions});
- replTest.startSet({shardsvr: ""});
- replTest.initiate();
- assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: shardName}));
-
- // Verify that the new shard's first oplog entry contains the string "initiating set". This
- // is used by change streams as a sentinel to indicate that no writes have occurred on the
- // replica set before this point.
- const firstOplogEntry = replTest.getPrimary().getCollection("local.oplog.rs").findOne();
- assert.docEq(firstOplogEntry.o, {msg: "initiating set"});
- assert.eq(firstOplogEntry.op, "n");
-
- return replTest;
- }
-
- // Helper function to resume from each event in a given list and confirm that the resumed stream
- // sees the subsequent events in the correct expected order.
- function assertCanResumeFromEachEvent(eventList) {
- for (let i = 0; i < eventList.length; ++i) {
- const resumedStream = coll.watch([], {resumeAfter: eventList[i]._id});
- for (let j = i + 1; j < eventList.length; ++j) {
- assert.soon(() => resumedStream.hasNext());
- assert.docEq(resumedStream.next(), eventList[j]);
- }
- resumedStream.close();
+}
+
+// Helper function to add a new ReplSetTest shard into the cluster. Using single-node shards
+// ensures that the "initiating set" entry cannot be rolled back.
+function addShardToCluster(shardName) {
+ const replTest = new ReplSetTest({name: shardName, nodes: 1, nodeOptions: rsNodeOptions});
+ replTest.startSet({shardsvr: ""});
+ replTest.initiate();
+ assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: shardName}));
+
+ // Verify that the new shard's first oplog entry contains the string "initiating set". This
+ // is used by change streams as a sentinel to indicate that no writes have occurred on the
+ // replica set before this point.
+ const firstOplogEntry = replTest.getPrimary().getCollection("local.oplog.rs").findOne();
+ assert.docEq(firstOplogEntry.o, {msg: "initiating set"});
+ assert.eq(firstOplogEntry.op, "n");
+
+ return replTest;
+}
+
+// Helper function to resume from each event in a given list and confirm that the resumed stream
+// sees the subsequent events in the correct expected order.
+function assertCanResumeFromEachEvent(eventList) {
+ for (let i = 0; i < eventList.length; ++i) {
+ const resumedStream = coll.watch([], {resumeAfter: eventList[i]._id});
+ for (let j = i + 1; j < eventList.length; ++j) {
+ assert.soon(() => resumedStream.hasNext());
+ assert.docEq(resumedStream.next(), eventList[j]);
}
+ resumedStream.close();
}
-
- // Open a change stream on the unsharded test collection.
- const csCursor = coll.watch();
- assert(!csCursor.hasNext());
- const changeList = [];
-
- // Insert some docs into the unsharded collection, and obtain a change stream event for each.
- const insertedDocs = [{_id: 1}, {_id: 2}, {_id: 3}];
- assert.commandWorked(coll.insert(insertedDocs));
- assertAllEventsObserved(csCursor, insertedDocs, changeList);
-
- // Verify that, for a brand new shard, we can start at an operation time before the set existed.
- let startAtDawnOfTimeCursor = coll.watch([], {startAtOperationTime: Timestamp(1, 1)});
- assertAllEventsObserved(startAtDawnOfTimeCursor, insertedDocs);
- startAtDawnOfTimeCursor.close();
-
- // Add a new shard into the cluster. Wait three seconds so that its initiation time is
- // guaranteed to be later than any of the events in the existing shard's oplog.
- const newShard1 = sleep(3000) || addShardToCluster("newShard1");
-
- // .. and confirm that we can resume from any point before the shard was added.
- assertCanResumeFromEachEvent(changeList);
-
- // Now shard the collection on _id and move one chunk to the new shard.
- st.shardColl(coll, {_id: 1}, {_id: 3}, false);
- assert.commandWorked(st.s.adminCommand(
- {moveChunk: coll.getFullName(), find: {_id: 3}, to: "newShard1", _waitForDelete: true}));
-
- // Insert some new documents into the new shard and verify that the original stream sees them.
- const newInsertedDocs = [{_id: 4}, {_id: 5}];
- assert.commandWorked(coll.insert(newInsertedDocs));
- assertAllEventsObserved(csCursor, newInsertedDocs, changeList);
-
- // Add a third shard into the cluster...
- const newShard2 = sleep(3000) || addShardToCluster("newShard2");
-
- // ... and verify that we can resume the stream from any of the preceding events.
- assertCanResumeFromEachEvent(changeList);
-
- // Now drop the collection, and verify that we can still resume from any point.
- assert(coll.drop());
- for (let expectedEvent of["drop", "invalidate"]) {
- assert.soon(() => csCursor.hasNext());
- assert.eq(csCursor.next().operationType, expectedEvent);
- }
- assertCanResumeFromEachEvent(changeList);
-
- // Verify that we can start at an operation time before the cluster existed and see all events.
- startAtDawnOfTimeCursor = coll.watch([], {startAtOperationTime: Timestamp(1, 1)});
- assertAllEventsObserved(startAtDawnOfTimeCursor, insertedDocs.concat(newInsertedDocs));
- startAtDawnOfTimeCursor.close();
-
- st.stop();
-
- // Stop the new shards manually since the ShardingTest doesn't know anything about them.
- newShard1.stopSet();
- newShard2.stopSet();
+}
+
+// Open a change stream on the unsharded test collection.
+const csCursor = coll.watch();
+assert(!csCursor.hasNext());
+const changeList = [];
+
+// Insert some docs into the unsharded collection, and obtain a change stream event for each.
+const insertedDocs = [{_id: 1}, {_id: 2}, {_id: 3}];
+assert.commandWorked(coll.insert(insertedDocs));
+assertAllEventsObserved(csCursor, insertedDocs, changeList);
+
+// Verify that, for a brand new shard, we can start at an operation time before the set existed.
+let startAtDawnOfTimeCursor = coll.watch([], {startAtOperationTime: Timestamp(1, 1)});
+assertAllEventsObserved(startAtDawnOfTimeCursor, insertedDocs);
+startAtDawnOfTimeCursor.close();
+
+// Add a new shard into the cluster. Wait three seconds so that its initiation time is
+// guaranteed to be later than any of the events in the existing shard's oplog.
+const newShard1 = sleep(3000) || addShardToCluster("newShard1");
+
+// .. and confirm that we can resume from any point before the shard was added.
+assertCanResumeFromEachEvent(changeList);
+
+// Now shard the collection on _id and move one chunk to the new shard.
+st.shardColl(coll, {_id: 1}, {_id: 3}, false);
+assert.commandWorked(st.s.adminCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 3}, to: "newShard1", _waitForDelete: true}));
+
+// Insert some new documents into the new shard and verify that the original stream sees them.
+const newInsertedDocs = [{_id: 4}, {_id: 5}];
+assert.commandWorked(coll.insert(newInsertedDocs));
+assertAllEventsObserved(csCursor, newInsertedDocs, changeList);
+
+// Add a third shard into the cluster...
+const newShard2 = sleep(3000) || addShardToCluster("newShard2");
+
+// ... and verify that we can resume the stream from any of the preceding events.
+assertCanResumeFromEachEvent(changeList);
+
+// Now drop the collection, and verify that we can still resume from any point.
+assert(coll.drop());
+for (let expectedEvent of ["drop", "invalidate"]) {
+ assert.soon(() => csCursor.hasNext());
+ assert.eq(csCursor.next().operationType, expectedEvent);
+}
+assertCanResumeFromEachEvent(changeList);
+
+// Verify that we can start at an operation time before the cluster existed and see all events.
+startAtDawnOfTimeCursor = coll.watch([], {startAtOperationTime: Timestamp(1, 1)});
+assertAllEventsObserved(startAtDawnOfTimeCursor, insertedDocs.concat(newInsertedDocs));
+startAtDawnOfTimeCursor.close();
+
+st.stop();
+
+// Stop the new shards manually since the ShardingTest doesn't know anything about them.
+newShard1.stopSet();
+newShard2.stopSet();
})();
diff --git a/jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js b/jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js
index 6dca178f6a3..4cffce18e1a 100644
--- a/jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js
+++ b/jstests/noPassthrough/change_stream_sharded_startafter_invalidate.js
@@ -4,41 +4,41 @@
// bug described in SERVER-41196.
// @tags: [requires_sharding, uses_change_streams]
(function() {
- "use strict";
+"use strict";
- // The edge case we are testing occurs on an unsharded collection in a sharded cluster. We
- // create a cluster with just one shard to ensure the test never blocks for another shard.
- const st = new ShardingTest(
- {shards: 1, mongos: 1, rs: {nodes: 1, setParameter: {writePeriodicNoops: false}}});
+// The edge case we are testing occurs on an unsharded collection in a sharded cluster. We
+// create a cluster with just one shard to ensure the test never blocks for another shard.
+const st = new ShardingTest(
+ {shards: 1, mongos: 1, rs: {nodes: 1, setParameter: {writePeriodicNoops: false}}});
- const mongosDB = st.s0.getDB(jsTestName());
- const mongosColl = mongosDB[jsTestName()];
+const mongosDB = st.s0.getDB(jsTestName());
+const mongosColl = mongosDB[jsTestName()];
- // Start a change stream that matches on the invalidate event.
- const changeStream = mongosColl.watch([{'$match': {'operationType': 'invalidate'}}]);
+// Start a change stream that matches on the invalidate event.
+const changeStream = mongosColl.watch([{'$match': {'operationType': 'invalidate'}}]);
- // Create the collection by inserting into it and then drop the collection, thereby generating
- // an invalidate event.
- assert.commandWorked(mongosColl.insert({_id: 1}));
- assert(mongosColl.drop());
- assert.soon(() => changeStream.hasNext());
- const invalidateEvent = changeStream.next();
+// Create the collection by inserting into it and then drop the collection, thereby generating
+// an invalidate event.
+assert.commandWorked(mongosColl.insert({_id: 1}));
+assert(mongosColl.drop());
+assert.soon(() => changeStream.hasNext());
+const invalidateEvent = changeStream.next();
- // Resuming the change stream using the invalidate event allows us to see events after the drop.
- const resumeStream = mongosColl.watch([], {startAfter: invalidateEvent["_id"]});
+// Resuming the change stream using the invalidate event allows us to see events after the drop.
+const resumeStream = mongosColl.watch([], {startAfter: invalidateEvent["_id"]});
- // The PBRT returned with the first (empty) batch should match the resume token we supplied.
- assert.eq(bsonWoCompare(resumeStream.getResumeToken(), invalidateEvent["_id"]), 0);
+// The PBRT returned with the first (empty) batch should match the resume token we supplied.
+assert.eq(bsonWoCompare(resumeStream.getResumeToken(), invalidateEvent["_id"]), 0);
- // Initially, there should be no events visible after the drop.
- assert(!resumeStream.hasNext());
+// Initially, there should be no events visible after the drop.
+assert(!resumeStream.hasNext());
- // Add one last event and make sure the change stream sees it.
- assert.commandWorked(mongosColl.insert({_id: 2}));
- assert.soon(() => resumeStream.hasNext());
- const afterDrop = resumeStream.next();
- assert.eq(afterDrop.operationType, "insert");
- assert.eq(afterDrop.fullDocument, {_id: 2});
+// Add one last event and make sure the change stream sees it.
+assert.commandWorked(mongosColl.insert({_id: 2}));
+assert.soon(() => resumeStream.hasNext());
+const afterDrop = resumeStream.next();
+assert.eq(afterDrop.operationType, "insert");
+assert.eq(afterDrop.fullDocument, {_id: 2});
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_stream_transaction.js b/jstests/noPassthrough/change_stream_transaction.js
index fb244c18366..8de51656cfa 100644
--- a/jstests/noPassthrough/change_stream_transaction.js
+++ b/jstests/noPassthrough/change_stream_transaction.js
@@ -8,277 +8,268 @@
* ]
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js"); // For PrepareHelpers.
-
- const dbName = "test";
- const collName = "change_stream_transaction";
-
- /**
- * This test sets an internal parameter in order to force transactions with more than 4
- * operations to span multiple oplog entries, making it easier to test that scenario.
- */
- const maxOpsInOplogEntry = 4;
-
- /**
- * Asserts that the expected operation type and documentKey are found on the change stream
- * cursor. Returns the change stream document.
- */
- function assertWriteVisible(cursor, operationType, documentKey) {
- assert.soon(() => cursor.hasNext());
- const changeDoc = cursor.next();
- assert.eq(operationType, changeDoc.operationType, changeDoc);
- assert.eq(documentKey, changeDoc.documentKey, changeDoc);
- return changeDoc;
- }
-
- /**
- * Asserts that the expected operation type and documentKey are found on the change stream
- * cursor. Pushes the corresponding resume token and change stream document to an array.
- */
- function assertWriteVisibleWithCapture(cursor, operationType, documentKey, changeList) {
- const changeDoc = assertWriteVisible(cursor, operationType, documentKey);
- changeList.push(changeDoc);
- }
-
- /**
- * Asserts that there are no changes waiting on the change stream cursor.
- */
- function assertNoChanges(cursor) {
- assert(!cursor.hasNext(), () => {
- return "Unexpected change set: " + tojson(cursor.toArray());
- });
- }
-
- function runTest(conn) {
- const db = conn.getDB(dbName);
- const coll = db.getCollection(collName);
- const unwatchedColl = db.getCollection(collName + "_unwatched");
- let changeList = [];
-
- // Collections must be created outside of any transaction.
- assert.commandWorked(db.createCollection(coll.getName()));
- assert.commandWorked(db.createCollection(unwatchedColl.getName()));
-
- //
- // Start transaction 1.
- //
- const session1 = db.getMongo().startSession();
- const sessionDb1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDb1[collName];
- session1.startTransaction({readConcern: {level: "majority"}});
-
- //
- // Start transaction 2.
- //
- const session2 = db.getMongo().startSession();
- const sessionDb2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDb2[collName];
- session2.startTransaction({readConcern: {level: "majority"}});
-
- //
- // Start transaction 3.
- //
- const session3 = db.getMongo().startSession();
- const sessionDb3 = session3.getDatabase(dbName);
- const sessionColl3 = sessionDb3[collName];
- session3.startTransaction({readConcern: {level: "majority"}});
-
- // Open a change stream on the test collection.
- const changeStreamCursor = coll.watch();
-
- // Insert a document and confirm that the change stream has it.
- assert.commandWorked(coll.insert({_id: "no-txn-doc-1"}, {writeConcern: {w: "majority"}}));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "no-txn-doc-1"}, changeList);
-
- // Insert two documents under each transaction and confirm no change stream updates.
- assert.commandWorked(sessionColl1.insert([{_id: "txn1-doc-1"}, {_id: "txn1-doc-2"}]));
- assert.commandWorked(sessionColl2.insert([{_id: "txn2-doc-1"}, {_id: "txn2-doc-2"}]));
- assertNoChanges(changeStreamCursor);
-
- // Update one document under each transaction and confirm no change stream updates.
- assert.commandWorked(sessionColl1.update({_id: "txn1-doc-1"}, {$set: {"updated": 1}}));
- assert.commandWorked(sessionColl2.update({_id: "txn2-doc-1"}, {$set: {"updated": 1}}));
- assertNoChanges(changeStreamCursor);
+"use strict";
- // Update and then remove the second doc under each transaction and confirm no change stream
- // events are seen.
- assert.commandWorked(
- sessionColl1.update({_id: "txn1-doc-2"}, {$set: {"update-before-delete": 1}}));
- assert.commandWorked(
- sessionColl2.update({_id: "txn2-doc-2"}, {$set: {"update-before-delete": 1}}));
- assert.commandWorked(sessionColl1.remove({_id: "txn1-doc-2"}));
- assert.commandWorked(sessionColl2.remove({_id: "txn2-doc-2"}));
- assertNoChanges(changeStreamCursor);
+load("jstests/core/txns/libs/prepare_helpers.js"); // For PrepareHelpers.
- // Perform a write to the 'session1' transaction in a collection that is not being watched
- // by 'changeStreamCursor'. We do not expect to see this write in the change stream either
- // now or on commit.
- assert.commandWorked(
- sessionDb1[unwatchedColl.getName()].insert({_id: "txn1-doc-unwatched-collection"}));
- assertNoChanges(changeStreamCursor);
+const dbName = "test";
+const collName = "change_stream_transaction";
- // Perform a write to the 'session3' transaction in a collection that is not being watched
- // by 'changeStreamCursor'. We do not expect to see this write in the change stream either
- // now or on commit.
- assert.commandWorked(
- sessionDb3[unwatchedColl.getName()].insert({_id: "txn3-doc-unwatched-collection"}));
- assertNoChanges(changeStreamCursor);
-
- // Perform a write outside of a transaction and confirm that the change stream sees only
- // this write.
- assert.commandWorked(coll.insert({_id: "no-txn-doc-2"}, {writeConcern: {w: "majority"}}));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "no-txn-doc-2"}, changeList);
- assertNoChanges(changeStreamCursor);
-
- let prepareTimestampTxn1;
- prepareTimestampTxn1 = PrepareHelpers.prepareTransaction(session1);
- assertNoChanges(changeStreamCursor);
-
- assert.commandWorked(coll.insert({_id: "no-txn-doc-3"}, {writeConcern: {w: "majority"}}));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "no-txn-doc-3"}, changeList);
-
- //
- // Commit first transaction and confirm expected changes.
- //
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestampTxn1));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "txn1-doc-1"}, changeList);
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "txn1-doc-2"}, changeList);
- assertWriteVisibleWithCapture(
- changeStreamCursor, "update", {_id: "txn1-doc-1"}, changeList);
- assertWriteVisibleWithCapture(
- changeStreamCursor, "update", {_id: "txn1-doc-2"}, changeList);
- assertWriteVisibleWithCapture(
- changeStreamCursor, "delete", {_id: "txn1-doc-2"}, changeList);
- assertNoChanges(changeStreamCursor);
-
- // Transition the second transaction to prepared. We skip capturing the prepare
- // timestamp it is not required for abortTransaction_forTesting().
- PrepareHelpers.prepareTransaction(session2);
- assertNoChanges(changeStreamCursor);
+/**
+ * This test sets an internal parameter in order to force transactions with more than 4
+ * operations to span multiple oplog entries, making it easier to test that scenario.
+ */
+const maxOpsInOplogEntry = 4;
- assert.commandWorked(coll.insert({_id: "no-txn-doc-4"}, {writeConcern: {w: "majority"}}));
- assertWriteVisibleWithCapture(
- changeStreamCursor, "insert", {_id: "no-txn-doc-4"}, changeList);
+/**
+ * Asserts that the expected operation type and documentKey are found on the change stream
+ * cursor. Returns the change stream document.
+ */
+function assertWriteVisible(cursor, operationType, documentKey) {
+ assert.soon(() => cursor.hasNext());
+ const changeDoc = cursor.next();
+ assert.eq(operationType, changeDoc.operationType, changeDoc);
+ assert.eq(documentKey, changeDoc.documentKey, changeDoc);
+ return changeDoc;
+}
+
+/**
+ * Asserts that the expected operation type and documentKey are found on the change stream
+ * cursor. Pushes the corresponding resume token and change stream document to an array.
+ */
+function assertWriteVisibleWithCapture(cursor, operationType, documentKey, changeList) {
+ const changeDoc = assertWriteVisible(cursor, operationType, documentKey);
+ changeList.push(changeDoc);
+}
- //
- // Abort second transaction.
- //
- session2.abortTransaction_forTesting();
+/**
+ * Asserts that there are no changes waiting on the change stream cursor.
+ */
+function assertNoChanges(cursor) {
+ assert(!cursor.hasNext(), () => {
+ return "Unexpected change set: " + tojson(cursor.toArray());
+ });
+}
+
+function runTest(conn) {
+ const db = conn.getDB(dbName);
+ const coll = db.getCollection(collName);
+ const unwatchedColl = db.getCollection(collName + "_unwatched");
+ let changeList = [];
+
+ // Collections must be created outside of any transaction.
+ assert.commandWorked(db.createCollection(coll.getName()));
+ assert.commandWorked(db.createCollection(unwatchedColl.getName()));
+
+ //
+ // Start transaction 1.
+ //
+ const session1 = db.getMongo().startSession();
+ const sessionDb1 = session1.getDatabase(dbName);
+ const sessionColl1 = sessionDb1[collName];
+ session1.startTransaction({readConcern: {level: "majority"}});
+
+ //
+ // Start transaction 2.
+ //
+ const session2 = db.getMongo().startSession();
+ const sessionDb2 = session2.getDatabase(dbName);
+ const sessionColl2 = sessionDb2[collName];
+ session2.startTransaction({readConcern: {level: "majority"}});
+
+ //
+ // Start transaction 3.
+ //
+ const session3 = db.getMongo().startSession();
+ const sessionDb3 = session3.getDatabase(dbName);
+ const sessionColl3 = sessionDb3[collName];
+ session3.startTransaction({readConcern: {level: "majority"}});
+
+ // Open a change stream on the test collection.
+ const changeStreamCursor = coll.watch();
+
+ // Insert a document and confirm that the change stream has it.
+ assert.commandWorked(coll.insert({_id: "no-txn-doc-1"}, {writeConcern: {w: "majority"}}));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "no-txn-doc-1"}, changeList);
+
+ // Insert two documents under each transaction and confirm no change stream updates.
+ assert.commandWorked(sessionColl1.insert([{_id: "txn1-doc-1"}, {_id: "txn1-doc-2"}]));
+ assert.commandWorked(sessionColl2.insert([{_id: "txn2-doc-1"}, {_id: "txn2-doc-2"}]));
+ assertNoChanges(changeStreamCursor);
+
+ // Update one document under each transaction and confirm no change stream updates.
+ assert.commandWorked(sessionColl1.update({_id: "txn1-doc-1"}, {$set: {"updated": 1}}));
+ assert.commandWorked(sessionColl2.update({_id: "txn2-doc-1"}, {$set: {"updated": 1}}));
+ assertNoChanges(changeStreamCursor);
+
+ // Update and then remove the second doc under each transaction and confirm no change stream
+ // events are seen.
+ assert.commandWorked(
+ sessionColl1.update({_id: "txn1-doc-2"}, {$set: {"update-before-delete": 1}}));
+ assert.commandWorked(
+ sessionColl2.update({_id: "txn2-doc-2"}, {$set: {"update-before-delete": 1}}));
+ assert.commandWorked(sessionColl1.remove({_id: "txn1-doc-2"}));
+ assert.commandWorked(sessionColl2.remove({_id: "txn2-doc-2"}));
+ assertNoChanges(changeStreamCursor);
+
+ // Perform a write to the 'session1' transaction in a collection that is not being watched
+ // by 'changeStreamCursor'. We do not expect to see this write in the change stream either
+ // now or on commit.
+ assert.commandWorked(
+ sessionDb1[unwatchedColl.getName()].insert({_id: "txn1-doc-unwatched-collection"}));
+ assertNoChanges(changeStreamCursor);
+
+ // Perform a write to the 'session3' transaction in a collection that is not being watched
+ // by 'changeStreamCursor'. We do not expect to see this write in the change stream either
+ // now or on commit.
+ assert.commandWorked(
+ sessionDb3[unwatchedColl.getName()].insert({_id: "txn3-doc-unwatched-collection"}));
+ assertNoChanges(changeStreamCursor);
+
+ // Perform a write outside of a transaction and confirm that the change stream sees only
+ // this write.
+ assert.commandWorked(coll.insert({_id: "no-txn-doc-2"}, {writeConcern: {w: "majority"}}));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "no-txn-doc-2"}, changeList);
+ assertNoChanges(changeStreamCursor);
+
+ let prepareTimestampTxn1;
+ prepareTimestampTxn1 = PrepareHelpers.prepareTransaction(session1);
+ assertNoChanges(changeStreamCursor);
+
+ assert.commandWorked(coll.insert({_id: "no-txn-doc-3"}, {writeConcern: {w: "majority"}}));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "no-txn-doc-3"}, changeList);
+
+ //
+ // Commit first transaction and confirm expected changes.
+ //
+ assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestampTxn1));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "txn1-doc-1"}, changeList);
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "txn1-doc-2"}, changeList);
+ assertWriteVisibleWithCapture(changeStreamCursor, "update", {_id: "txn1-doc-1"}, changeList);
+ assertWriteVisibleWithCapture(changeStreamCursor, "update", {_id: "txn1-doc-2"}, changeList);
+ assertWriteVisibleWithCapture(changeStreamCursor, "delete", {_id: "txn1-doc-2"}, changeList);
+ assertNoChanges(changeStreamCursor);
+
+ // Transition the second transaction to prepared. We skip capturing the prepare
+ // timestamp it is not required for abortTransaction_forTesting().
+ PrepareHelpers.prepareTransaction(session2);
+ assertNoChanges(changeStreamCursor);
+
+ assert.commandWorked(coll.insert({_id: "no-txn-doc-4"}, {writeConcern: {w: "majority"}}));
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", {_id: "no-txn-doc-4"}, changeList);
+
+ //
+ // Abort second transaction.
+ //
+ session2.abortTransaction_forTesting();
+ assertNoChanges(changeStreamCursor);
+
+ //
+ // Start transaction 4.
+ //
+ const session4 = db.getMongo().startSession();
+ const sessionDb4 = session4.getDatabase(dbName);
+ const sessionColl4 = sessionDb4[collName];
+ session4.startTransaction({readConcern: {level: "majority"}});
+
+ // Perform enough writes to fill up one applyOps.
+ const txn4Inserts = Array.from({length: maxOpsInOplogEntry},
+ (_, index) => ({_id: {name: "txn4-doc", index: index}}));
+ txn4Inserts.forEach(function(doc) {
+ sessionColl4.insert(doc);
assertNoChanges(changeStreamCursor);
+ });
- //
- // Start transaction 4.
- //
- const session4 = db.getMongo().startSession();
- const sessionDb4 = session4.getDatabase(dbName);
- const sessionColl4 = sessionDb4[collName];
- session4.startTransaction({readConcern: {level: "majority"}});
-
- // Perform enough writes to fill up one applyOps.
- const txn4Inserts = Array.from({length: maxOpsInOplogEntry},
- (_, index) => ({_id: {name: "txn4-doc", index: index}}));
- txn4Inserts.forEach(function(doc) {
- sessionColl4.insert(doc);
- assertNoChanges(changeStreamCursor);
- });
-
- // Perform enough writes to an unwatched collection to fill up a second applyOps. We
- // specifically want to test the case where a multi-applyOps transaction has no relevant
- // updates in its final applyOps.
- txn4Inserts.forEach(function(doc) {
- assert.commandWorked(sessionDb4[unwatchedColl.getName()].insert(doc));
- assertNoChanges(changeStreamCursor);
- });
-
- //
- // Start transaction 5.
- //
- const session5 = db.getMongo().startSession();
- const sessionDb5 = session5.getDatabase(dbName);
- const sessionColl5 = sessionDb5[collName];
- session5.startTransaction({readConcern: {level: "majority"}});
-
- // Perform enough writes to span 3 applyOps entries.
- const txn5Inserts = Array.from({length: 3 * maxOpsInOplogEntry},
- (_, index) => ({_id: {name: "txn5-doc", index: index}}));
- txn5Inserts.forEach(function(doc) {
- assert.commandWorked(sessionColl5.insert(doc));
- assertNoChanges(changeStreamCursor);
- });
-
- //
- // Prepare and commit transaction 5.
- //
- const prepareTimestampTxn5 = PrepareHelpers.prepareTransaction(session5);
+ // Perform enough writes to an unwatched collection to fill up a second applyOps. We
+ // specifically want to test the case where a multi-applyOps transaction has no relevant
+ // updates in its final applyOps.
+ txn4Inserts.forEach(function(doc) {
+ assert.commandWorked(sessionDb4[unwatchedColl.getName()].insert(doc));
assertNoChanges(changeStreamCursor);
- assert.commandWorked(PrepareHelpers.commitTransaction(session5, prepareTimestampTxn5));
- txn5Inserts.forEach(function(doc) {
- assertWriteVisibleWithCapture(changeStreamCursor, "insert", doc, changeList);
- });
-
- //
- // Commit transaction 4 without preparing.
- //
- session4.commitTransaction();
- txn4Inserts.forEach(function(doc) {
- assertWriteVisibleWithCapture(changeStreamCursor, "insert", doc, changeList);
- });
+ });
+
+ //
+ // Start transaction 5.
+ //
+ const session5 = db.getMongo().startSession();
+ const sessionDb5 = session5.getDatabase(dbName);
+ const sessionColl5 = sessionDb5[collName];
+ session5.startTransaction({readConcern: {level: "majority"}});
+
+ // Perform enough writes to span 3 applyOps entries.
+ const txn5Inserts = Array.from({length: 3 * maxOpsInOplogEntry},
+ (_, index) => ({_id: {name: "txn5-doc", index: index}}));
+ txn5Inserts.forEach(function(doc) {
+ assert.commandWorked(sessionColl5.insert(doc));
assertNoChanges(changeStreamCursor);
-
- changeStreamCursor.close();
-
- // Test that change stream resume returns the expected set of documents at each point
- // captured by this test.
- for (let i = 0; i < changeList.length; ++i) {
- const resumeCursor = coll.watch([], {startAfter: changeList[i]._id});
-
- for (let x = (i + 1); x < changeList.length; ++x) {
- const expectedChangeDoc = changeList[x];
- assertWriteVisible(
- resumeCursor, expectedChangeDoc.operationType, expectedChangeDoc.documentKey);
- }
-
- assertNoChanges(resumeCursor);
- resumeCursor.close();
+ });
+
+ //
+ // Prepare and commit transaction 5.
+ //
+ const prepareTimestampTxn5 = PrepareHelpers.prepareTransaction(session5);
+ assertNoChanges(changeStreamCursor);
+ assert.commandWorked(PrepareHelpers.commitTransaction(session5, prepareTimestampTxn5));
+ txn5Inserts.forEach(function(doc) {
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", doc, changeList);
+ });
+
+ //
+ // Commit transaction 4 without preparing.
+ //
+ session4.commitTransaction();
+ txn4Inserts.forEach(function(doc) {
+ assertWriteVisibleWithCapture(changeStreamCursor, "insert", doc, changeList);
+ });
+ assertNoChanges(changeStreamCursor);
+
+ changeStreamCursor.close();
+
+ // Test that change stream resume returns the expected set of documents at each point
+ // captured by this test.
+ for (let i = 0; i < changeList.length; ++i) {
+ const resumeCursor = coll.watch([], {startAfter: changeList[i]._id});
+
+ for (let x = (i + 1); x < changeList.length; ++x) {
+ const expectedChangeDoc = changeList[x];
+ assertWriteVisible(
+ resumeCursor, expectedChangeDoc.operationType, expectedChangeDoc.documentKey);
}
- //
- // Prepare and commit the third transaction and confirm that there are no visible changes.
- //
- let prepareTimestampTxn3;
- prepareTimestampTxn3 = PrepareHelpers.prepareTransaction(session3);
- assertNoChanges(changeStreamCursor);
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session3, prepareTimestampTxn3));
- assertNoChanges(changeStreamCursor);
-
- assert.commandWorked(db.dropDatabase());
- }
-
- let replSetTestDescription = {nodes: 1};
- if (!jsTest.options().setParameters.hasOwnProperty(
- "maxNumberOfTransactionOperationsInSingleOplogEntry")) {
- // Configure the replica set to use our value for maxOpsInOplogEntry.
- replSetTestDescription.nodeOptions = {
- setParameter: {maxNumberOfTransactionOperationsInSingleOplogEntry: maxOpsInOplogEntry}
- };
- } else {
- // The test is executing in a build variant that already defines its own override value for
- // maxNumberOfTransactionOperationsInSingleOplogEntry. Even though the build variant's
- // choice for this override won't test the same edge cases, the test should still succeed.
+ assertNoChanges(resumeCursor);
+ resumeCursor.close();
}
- const rst = new ReplSetTest(replSetTestDescription);
- rst.startSet();
- rst.initiate();
-
- runTest(rst.getPrimary());
- rst.stopSet();
+ //
+ // Prepare and commit the third transaction and confirm that there are no visible changes.
+ //
+ let prepareTimestampTxn3;
+ prepareTimestampTxn3 = PrepareHelpers.prepareTransaction(session3);
+ assertNoChanges(changeStreamCursor);
+
+ assert.commandWorked(PrepareHelpers.commitTransaction(session3, prepareTimestampTxn3));
+ assertNoChanges(changeStreamCursor);
+
+ assert.commandWorked(db.dropDatabase());
+}
+
+let replSetTestDescription = {nodes: 1};
+if (!jsTest.options().setParameters.hasOwnProperty(
+ "maxNumberOfTransactionOperationsInSingleOplogEntry")) {
+ // Configure the replica set to use our value for maxOpsInOplogEntry.
+ replSetTestDescription.nodeOptions = {
+ setParameter: {maxNumberOfTransactionOperationsInSingleOplogEntry: maxOpsInOplogEntry}
+ };
+} else {
+ // The test is executing in a build variant that already defines its own override value for
+ // maxNumberOfTransactionOperationsInSingleOplogEntry. Even though the build variant's
+ // choice for this override won't test the same edge cases, the test should still succeed.
+}
+const rst = new ReplSetTest(replSetTestDescription);
+rst.startSet();
+rst.initiate();
+
+runTest(rst.getPrimary());
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/change_streams_collation_chunk_migration.js b/jstests/noPassthrough/change_streams_collation_chunk_migration.js
index 4be1044d2d9..51d0536900d 100644
--- a/jstests/noPassthrough/change_streams_collation_chunk_migration.js
+++ b/jstests/noPassthrough/change_streams_collation_chunk_migration.js
@@ -4,61 +4,64 @@
* @tags: [requires_replication, requires_journaling]
*/
(function() {
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest'.
- const st = new ShardingTest({
- shards: 2,
- mongos: 1,
- rs: {
- nodes: 1,
- },
- });
+const st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ rs: {
+ nodes: 1,
+ },
+});
- const testDB = st.s.getDB(jsTestName());
+const testDB = st.s.getDB(jsTestName());
- // Enable sharding on the test database and ensure that the primary is shard0.
- assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
- st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
+// Enable sharding on the test database and ensure that the primary is shard0.
+assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
+st.ensurePrimaryShard(testDB.getName(), st.shard0.shardName);
- const caseInsensitiveCollectionName = "change_stream_case_insensitive";
- const caseInsensitive = {locale: "en_US", strength: 2};
+const caseInsensitiveCollectionName = "change_stream_case_insensitive";
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
- // Create the collection with a case-insensitive collation, then shard it on {shardKey: 1}.
- const caseInsensitiveCollection = assertDropAndRecreateCollection(
- testDB, caseInsensitiveCollectionName, {collation: caseInsensitive});
- assert.commandWorked(
- caseInsensitiveCollection.createIndex({shardKey: 1}, {collation: {locale: "simple"}}));
- assert.commandWorked(testDB.adminCommand({
- shardCollection: caseInsensitiveCollection.getFullName(),
- key: {shardKey: 1},
- collation: {locale: "simple"}
- }));
+// Create the collection with a case-insensitive collation, then shard it on {shardKey: 1}.
+const caseInsensitiveCollection = assertDropAndRecreateCollection(
+ testDB, caseInsensitiveCollectionName, {collation: caseInsensitive});
+assert.commandWorked(
+ caseInsensitiveCollection.createIndex({shardKey: 1}, {collation: {locale: "simple"}}));
+assert.commandWorked(testDB.adminCommand({
+ shardCollection: caseInsensitiveCollection.getFullName(),
+ key: {shardKey: 1},
+ collation: {locale: "simple"}
+}));
- // Verify that the collection does not exist on shard1.
- assert(!st.shard1.getCollection(caseInsensitiveCollection.getFullName()).exists());
+// Verify that the collection does not exist on shard1.
+assert(!st.shard1.getCollection(caseInsensitiveCollection.getFullName()).exists());
- // Now open a change stream on the collection.
- const cst = new ChangeStreamTest(testDB);
- const csCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey.shardKey"}}],
- collection: caseInsensitiveCollection
- });
+// Now open a change stream on the collection.
+const cst = new ChangeStreamTest(testDB);
+const csCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey.shardKey"}}],
+ collection: caseInsensitiveCollection
+});
- // Insert some documents into the collection.
- assert.commandWorked(caseInsensitiveCollection.insert({shardKey: 0, text: "aBc"}));
- assert.commandWorked(caseInsensitiveCollection.insert({shardKey: 1, text: "abc"}));
+// Insert some documents into the collection.
+assert.commandWorked(caseInsensitiveCollection.insert({shardKey: 0, text: "aBc"}));
+assert.commandWorked(caseInsensitiveCollection.insert({shardKey: 1, text: "abc"}));
- // Move a chunk from shard0 to shard1. This will create the collection on shard1.
- assert.commandWorked(testDB.adminCommand({
- moveChunk: caseInsensitiveCollection.getFullName(),
- find: {shardKey: 1},
- to: st.rs1.getURL(),
- _waitForDelete: false
- }));
+// Move a chunk from shard0 to shard1. This will create the collection on shard1.
+assert.commandWorked(testDB.adminCommand({
+ moveChunk: caseInsensitiveCollection.getFullName(),
+ find: {shardKey: 1},
+ to: st.rs1.getURL(),
+ _waitForDelete: false
+}));
- // Attempt to read from the change stream. We should see both inserts, without an invalidation.
- cst.assertNextChangesEqual({cursor: csCursor, expectedChanges: [{docId: 0}, {docId: 1}]});
+// Attempt to read from the change stream. We should see both inserts, without an invalidation.
+cst.assertNextChangesEqual({cursor: csCursor, expectedChanges: [{docId: 0}, {docId: 1}]});
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_streams_require_majority_read_concern.js b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
index 8481ba586f1..6fdc4c2ee37 100644
--- a/jstests/noPassthrough/change_streams_require_majority_read_concern.js
+++ b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
@@ -1,97 +1,96 @@
// Tests that the $changeStream requires read concern majority.
// @tags: [uses_change_streams]
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/namespace_utils.js"); // For getCollectionNameFromFullNamespace.
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/namespace_utils.js"); // For getCollectionNameFromFullNamespace.
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
- const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
+const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
- rst.initiate();
+rst.initiate();
- const name = "change_stream_require_majority_read_concern";
- const db = rst.getPrimary().getDB(name);
+const name = "change_stream_require_majority_read_concern";
+const db = rst.getPrimary().getDB(name);
- // Use ChangeStreamTest to verify that the pipeline returns expected results.
- const cst = new ChangeStreamTest(db);
+// Use ChangeStreamTest to verify that the pipeline returns expected results.
+const cst = new ChangeStreamTest(db);
- // Attempts to get a document from the cursor with awaitData disabled, and asserts if a
- // document is present.
- function assertNextBatchIsEmpty(cursor) {
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "disableAwaitDataForGetMoreCmd", mode: "alwaysOn"}));
- let res = assert.commandWorked(db.runCommand({
- getMore: cursor.id,
- collection: getCollectionNameFromFullNamespace(cursor.ns),
- batchSize: 1
- }));
- assert.eq(res.cursor.nextBatch.length, 0);
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "disableAwaitDataForGetMoreCmd", mode: "off"}));
- }
+// Attempts to get a document from the cursor with awaitData disabled, and asserts if a
+// document is present.
+function assertNextBatchIsEmpty(cursor) {
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "disableAwaitDataForGetMoreCmd", mode: "alwaysOn"}));
+ let res = assert.commandWorked(db.runCommand({
+ getMore: cursor.id,
+ collection: getCollectionNameFromFullNamespace(cursor.ns),
+ batchSize: 1
+ }));
+ assert.eq(res.cursor.nextBatch.length, 0);
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "disableAwaitDataForGetMoreCmd", mode: "off"}));
+}
- // Test read concerns other than "majority" are not supported.
- const primaryColl = db.foo;
- assert.writeOK(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- let res = primaryColl.runCommand({
- aggregate: primaryColl.getName(),
- pipeline: [{$changeStream: {}}],
- cursor: {},
- readConcern: {level: "local"},
- });
- assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
- res = primaryColl.runCommand({
- aggregate: primaryColl.getName(),
- pipeline: [{$changeStream: {}}],
- cursor: {},
- readConcern: {level: "linearizable"},
- });
- assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+// Test read concerns other than "majority" are not supported.
+const primaryColl = db.foo;
+assert.writeOK(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+let res = primaryColl.runCommand({
+ aggregate: primaryColl.getName(),
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ readConcern: {level: "local"},
+});
+assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
+res = primaryColl.runCommand({
+ aggregate: primaryColl.getName(),
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ readConcern: {level: "linearizable"},
+});
+assert.commandFailedWithCode(res, ErrorCodes.InvalidOptions);
- // Test that explicit read concern "majority" works.
- res = primaryColl.runCommand({
- aggregate: primaryColl.getName(),
- pipeline: [{$changeStream: {}}],
- cursor: {},
- readConcern: {level: "majority"},
- });
- assert.commandWorked(res);
+// Test that explicit read concern "majority" works.
+res = primaryColl.runCommand({
+ aggregate: primaryColl.getName(),
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ readConcern: {level: "majority"},
+});
+assert.commandWorked(res);
- // Test not specifying readConcern defaults to "majority" read concern.
- stopReplicationOnSecondaries(rst);
- // Verify that the document just inserted cannot be returned.
- let cursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: primaryColl});
- assert.eq(cursor.firstBatch.length, 0);
+// Test not specifying readConcern defaults to "majority" read concern.
+stopReplicationOnSecondaries(rst);
+// Verify that the document just inserted cannot be returned.
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: primaryColl});
+assert.eq(cursor.firstBatch.length, 0);
- // Insert a document on the primary only.
- assert.writeOK(primaryColl.insert({_id: 2}, {writeConcern: {w: 1}}));
- assertNextBatchIsEmpty(cursor);
+// Insert a document on the primary only.
+assert.writeOK(primaryColl.insert({_id: 2}, {writeConcern: {w: 1}}));
+assertNextBatchIsEmpty(cursor);
- // Restart data replicaiton and wait until the new write becomes visible.
- restartReplicationOnSecondaries(rst);
- rst.awaitLastOpCommitted();
+// Restart data replicaiton and wait until the new write becomes visible.
+restartReplicationOnSecondaries(rst);
+rst.awaitLastOpCommitted();
- // Verify that the expected doc is returned because it has been committed.
- let doc = cst.getOneChange(cursor);
- assert.docEq(doc.operationType, "insert");
- assert.docEq(doc.fullDocument, {_id: 2});
- rst.stopSet();
+// Verify that the expected doc is returned because it has been committed.
+let doc = cst.getOneChange(cursor);
+assert.docEq(doc.operationType, "insert");
+assert.docEq(doc.fullDocument, {_id: 2});
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_streams_required_privileges.js b/jstests/noPassthrough/change_streams_required_privileges.js
index 71ccd81758e..137896a3f8f 100644
--- a/jstests/noPassthrough/change_streams_required_privileges.js
+++ b/jstests/noPassthrough/change_streams_required_privileges.js
@@ -2,341 +2,331 @@
// This test uses the WiredTiger storage engine, which does not support running without journaling.
// @tags: [requires_replication,requires_journaling]
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
- const password = "test_password";
- rst.getPrimary().getDB("admin").createUser(
- {user: "userAdmin", pwd: password, roles: [{db: "admin", role: "userAdminAnyDatabase"}]});
- rst.restart(0, {auth: ''});
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
+const password = "test_password";
+rst.getPrimary().getDB("admin").createUser(
+ {user: "userAdmin", pwd: password, roles: [{db: "admin", role: "userAdminAnyDatabase"}]});
+rst.restart(0, {auth: ''});
- const db = rst.getPrimary().getDB("test");
- const coll = db.coll;
- const adminDB = db.getSiblingDB("admin");
+const db = rst.getPrimary().getDB("test");
+const coll = db.coll;
+const adminDB = db.getSiblingDB("admin");
- // Wrap different sections of the test in separate functions to make the scoping clear.
- (function createRoles() {
- assert(adminDB.auth("userAdmin", password));
- // Create some collection-level roles.
- db.createRole({
- role: "write",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: coll.getName()},
- actions: ["insert", "update", "remove"]
- }]
- });
- db.createRole({
- role: "find_only",
- roles: [],
- privileges:
- [{resource: {db: db.getName(), collection: coll.getName()}, actions: ["find"]}]
- });
- db.createRole({
- role: "find_and_change_stream",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: coll.getName()},
- actions: ["find", "changeStream"]
- }]
- });
- db.createRole({
- role: "change_stream_only",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: coll.getName()},
- actions: ["changeStream"]
- }]
- });
+// Wrap different sections of the test in separate functions to make the scoping clear.
+(function createRoles() {
+ assert(adminDB.auth("userAdmin", password));
+ // Create some collection-level roles.
+ db.createRole({
+ role: "write",
+ roles: [],
+ privileges: [{
+ resource: {db: db.getName(), collection: coll.getName()},
+ actions: ["insert", "update", "remove"]
+ }]
+ });
+ db.createRole({
+ role: "find_only",
+ roles: [],
+ privileges: [{resource: {db: db.getName(), collection: coll.getName()}, actions: ["find"]}]
+ });
+ db.createRole({
+ role: "find_and_change_stream",
+ roles: [],
+ privileges: [{
+ resource: {db: db.getName(), collection: coll.getName()},
+ actions: ["find", "changeStream"]
+ }]
+ });
+ db.createRole({
+ role: "change_stream_only",
+ roles: [],
+ privileges:
+ [{resource: {db: db.getName(), collection: coll.getName()}, actions: ["changeStream"]}]
+ });
- // Create some privileges at the database level.
- db.createRole({
- role: "db_write",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: ""},
- actions: ["insert", "update", "remove"]
- }]
- });
- db.createRole({
- role: "db_find_only",
- roles: [],
- privileges: [{resource: {db: db.getName(), collection: ""}, actions: ["find"]}]
- });
- db.createRole({
- role: "db_find_and_change_stream",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: ""},
- actions: ["find", "changeStream"]
- }]
- });
- db.createRole({
- role: "db_change_stream_only",
- roles: [],
- privileges:
- [{resource: {db: db.getName(), collection: ""}, actions: ["changeStream"]}]
- });
+ // Create some privileges at the database level.
+ db.createRole({
+ role: "db_write",
+ roles: [],
+ privileges: [
+ {resource: {db: db.getName(), collection: ""},
+ actions: ["insert", "update", "remove"]}
+ ]
+ });
+ db.createRole({
+ role: "db_find_only",
+ roles: [],
+ privileges: [{resource: {db: db.getName(), collection: ""}, actions: ["find"]}]
+ });
+ db.createRole({
+ role: "db_find_and_change_stream",
+ roles: [],
+ privileges:
+ [{resource: {db: db.getName(), collection: ""}, actions: ["find", "changeStream"]}]
+ });
+ db.createRole({
+ role: "db_change_stream_only",
+ roles: [],
+ privileges: [{resource: {db: db.getName(), collection: ""}, actions: ["changeStream"]}]
+ });
- // Create some privileges at the admin database level.
- adminDB.createRole({
- role: "admin_db_write",
- roles: [],
- privileges: [{
- resource: {db: db.getName(), collection: ""},
- actions: ["insert", "update", "remove"]
- }]
- });
- adminDB.createRole({
- role: "admin_db_find_only",
- roles: [],
- privileges: [{resource: {db: "admin", collection: ""}, actions: ["find"]}]
- });
- adminDB.createRole({
- role: "admin_db_find_and_change_stream",
- roles: [],
- privileges:
- [{resource: {db: "admin", collection: ""}, actions: ["find", "changeStream"]}]
- });
- adminDB.createRole({
- role: "admin_db_change_stream_only",
- roles: [],
- privileges: [{resource: {db: "admin", collection: ""}, actions: ["changeStream"]}]
- });
+ // Create some privileges at the admin database level.
+ adminDB.createRole({
+ role: "admin_db_write",
+ roles: [],
+ privileges: [
+ {resource: {db: db.getName(), collection: ""},
+ actions: ["insert", "update", "remove"]}
+ ]
+ });
+ adminDB.createRole({
+ role: "admin_db_find_only",
+ roles: [],
+ privileges: [{resource: {db: "admin", collection: ""}, actions: ["find"]}]
+ });
+ adminDB.createRole({
+ role: "admin_db_find_and_change_stream",
+ roles: [],
+ privileges: [{resource: {db: "admin", collection: ""}, actions: ["find", "changeStream"]}]
+ });
+ adminDB.createRole({
+ role: "admin_db_change_stream_only",
+ roles: [],
+ privileges: [{resource: {db: "admin", collection: ""}, actions: ["changeStream"]}]
+ });
- // Create some roles at the any-db, any-collection level.
- adminDB.createRole({
- role: "any_db_find_only",
- roles: [],
- privileges: [{resource: {db: "", collection: ""}, actions: ["find"]}]
- });
- adminDB.createRole({
- role: "any_db_find_and_change_stream",
- roles: [],
- privileges: [{resource: {db: "", collection: ""}, actions: ["find", "changeStream"]}]
- });
- adminDB.createRole({
- role: "any_db_change_stream_only",
- roles: [],
- privileges: [{resource: {db: "", collection: ""}, actions: ["changeStream"]}]
- });
+ // Create some roles at the any-db, any-collection level.
+ adminDB.createRole({
+ role: "any_db_find_only",
+ roles: [],
+ privileges: [{resource: {db: "", collection: ""}, actions: ["find"]}]
+ });
+ adminDB.createRole({
+ role: "any_db_find_and_change_stream",
+ roles: [],
+ privileges: [{resource: {db: "", collection: ""}, actions: ["find", "changeStream"]}]
+ });
+ adminDB.createRole({
+ role: "any_db_change_stream_only",
+ roles: [],
+ privileges: [{resource: {db: "", collection: ""}, actions: ["changeStream"]}]
+ });
- // Create some roles at the cluster level.
- adminDB.createRole({
- role: "cluster_find_only",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["find"]}]
- });
- adminDB.createRole({
- role: "cluster_find_and_change_stream",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["find", "changeStream"]}]
- });
- adminDB.createRole({
- role: "cluster_change_stream_only",
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ["changeStream"]}]
- });
- }());
+ // Create some roles at the cluster level.
+ adminDB.createRole({
+ role: "cluster_find_only",
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ["find"]}]
+ });
+ adminDB.createRole({
+ role: "cluster_find_and_change_stream",
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ["find", "changeStream"]}]
+ });
+ adminDB.createRole({
+ role: "cluster_change_stream_only",
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ["changeStream"]}]
+ });
+}());
- (function createUsers() {
- // Create some users for a specific collection. Use the name of the role as the name of the
- // user.
- for (let role of["write", "find_only", "find_and_change_stream", "change_stream_only"]) {
- db.createUser({user: role, pwd: password, roles: [role]});
- }
+(function createUsers() {
+ // Create some users for a specific collection. Use the name of the role as the name of the
+ // user.
+ for (let role of ["write", "find_only", "find_and_change_stream", "change_stream_only"]) {
+ db.createUser({user: role, pwd: password, roles: [role]});
+ }
- // Create some users at the database level. Use the name of the role as the name of the
- // user, except for the built-in roles.
- for (let role of["db_write",
- "db_find_only",
- "db_find_and_change_stream",
- "db_change_stream_only"]) {
- db.createUser({user: role, pwd: password, roles: [role]});
- }
- db.createUser({user: "db_read", pwd: password, roles: ["read"]});
+ // Create some users at the database level. Use the name of the role as the name of the
+ // user, except for the built-in roles.
+ for (let role of
+ ["db_write", "db_find_only", "db_find_and_change_stream", "db_change_stream_only"]) {
+ db.createUser({user: role, pwd: password, roles: [role]});
+ }
+ db.createUser({user: "db_read", pwd: password, roles: ["read"]});
- // Create some users on the admin database. Use the name of the role as the name of the
- // user, except for the built-in roles.
- for (let role of["admin_db_write",
- "admin_db_find_only",
- "admin_db_find_and_change_stream",
- "admin_db_change_stream_only"]) {
- adminDB.createUser({user: role, pwd: password, roles: [role]});
- }
- adminDB.createUser({user: "admin_db_read", pwd: password, roles: ["read"]});
+ // Create some users on the admin database. Use the name of the role as the name of the
+ // user, except for the built-in roles.
+ for (let role of ["admin_db_write",
+ "admin_db_find_only",
+ "admin_db_find_and_change_stream",
+ "admin_db_change_stream_only"]) {
+ adminDB.createUser({user: role, pwd: password, roles: [role]});
+ }
+ adminDB.createUser({user: "admin_db_read", pwd: password, roles: ["read"]});
- // Create some users with privileges on all databases. Use the name of the role as the name
- // of the user, except for the built-in roles.
- for (let role of["any_db_find_only",
- "any_db_find_and_change_stream",
- "any_db_change_stream_only"]) {
- adminDB.createUser({user: role, pwd: password, roles: [role]});
- }
+ // Create some users with privileges on all databases. Use the name of the role as the name
+ // of the user, except for the built-in roles.
+ for (let role of ["any_db_find_only",
+ "any_db_find_and_change_stream",
+ "any_db_change_stream_only"]) {
+ adminDB.createUser({user: role, pwd: password, roles: [role]});
+ }
- // Create some users on the whole cluster. Use the name of the role as the name of the user.
- for (let role of["cluster_find_only",
- "cluster_find_and_change_stream",
- "cluster_change_stream_only"]) {
- adminDB.createUser({user: role, pwd: password, roles: [role]});
- }
- }());
+ // Create some users on the whole cluster. Use the name of the role as the name of the user.
+ for (let role of ["cluster_find_only",
+ "cluster_find_and_change_stream",
+ "cluster_change_stream_only"]) {
+ adminDB.createUser({user: role, pwd: password, roles: [role]});
+ }
+}());
- (function testPrivilegesForSingleCollection() {
- // Test that users without the required privileges cannot open a change stream. A user
- // needs both the 'find' and 'changeStream' action on the collection. Note in particular
- // that the whole-cluster privileges (specified with {cluster: true}) is not enough to open
- // a change stream on any particular collection.
- for (let userWithoutPrivileges of[{db: db, name: "find_only"},
- {db: db, name: "change_stream_only"},
- {db: db, name: "write"},
- {db: db, name: "db_find_only"},
- {db: db, name: "db_change_stream_only"},
- {db: db, name: "db_write"},
- {db: adminDB, name: "admin_db_find_only"},
- {db: adminDB, name: "admin_db_find_and_change_stream"},
- {db: adminDB, name: "admin_db_change_stream_only"},
- {db: adminDB, name: "admin_db_read"},
- {db: adminDB, name: "any_db_find_only"},
- {db: adminDB, name: "any_db_change_stream_only"},
- {db: adminDB, name: "cluster_find_only"},
- {db: adminDB, name: "cluster_find_and_change_stream"},
- {db: adminDB, name: "cluster_change_stream_only"}]) {
- jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream ` +
- `on a collection`);
- const db = userWithoutPrivileges.db;
- assert(db.auth(userWithoutPrivileges.name, password));
+(function testPrivilegesForSingleCollection() {
+ // Test that users without the required privileges cannot open a change stream. A user
+ // needs both the 'find' and 'changeStream' action on the collection. Note in particular
+ // that the whole-cluster privileges (specified with {cluster: true}) is not enough to open
+ // a change stream on any particular collection.
+ for (let userWithoutPrivileges of [{db: db, name: "find_only"},
+ {db: db, name: "change_stream_only"},
+ {db: db, name: "write"},
+ {db: db, name: "db_find_only"},
+ {db: db, name: "db_change_stream_only"},
+ {db: db, name: "db_write"},
+ {db: adminDB, name: "admin_db_find_only"},
+ {db: adminDB, name: "admin_db_find_and_change_stream"},
+ {db: adminDB, name: "admin_db_change_stream_only"},
+ {db: adminDB, name: "admin_db_read"},
+ {db: adminDB, name: "any_db_find_only"},
+ {db: adminDB, name: "any_db_change_stream_only"},
+ {db: adminDB, name: "cluster_find_only"},
+ {db: adminDB, name: "cluster_find_and_change_stream"},
+ {db: adminDB, name: "cluster_change_stream_only"}]) {
+ jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream ` +
+ `on a collection`);
+ const db = userWithoutPrivileges.db;
+ assert(db.auth(userWithoutPrivileges.name, password));
- assert.commandFailedWithCode(
- coll.getDB().runCommand(
- {aggregate: coll.getName(), pipeline: [{$changeStream: {}}], cursor: {}}),
- ErrorCodes.Unauthorized);
+ assert.commandFailedWithCode(
+ coll.getDB().runCommand(
+ {aggregate: coll.getName(), pipeline: [{$changeStream: {}}], cursor: {}}),
+ ErrorCodes.Unauthorized);
- db.logout();
- }
+ db.logout();
+ }
- // Test that a user with the required privileges can open a change stream.
- for (let userWithPrivileges of[{db: db, name: "find_and_change_stream"},
- {db: db, name: "db_find_and_change_stream"},
- {db: db, name: "db_read"},
- {db: adminDB, name: "any_db_find_and_change_stream"}]) {
- jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream on a` +
- ` collection`);
- const db = userWithPrivileges.db;
- assert(db.auth(userWithPrivileges.name, password));
+ // Test that a user with the required privileges can open a change stream.
+ for (let userWithPrivileges of [{db: db, name: "find_and_change_stream"},
+ {db: db, name: "db_find_and_change_stream"},
+ {db: db, name: "db_read"},
+ {db: adminDB, name: "any_db_find_and_change_stream"}]) {
+ jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream on a` +
+ ` collection`);
+ const db = userWithPrivileges.db;
+ assert(db.auth(userWithPrivileges.name, password));
- assert.doesNotThrow(() => coll.watch());
+ assert.doesNotThrow(() => coll.watch());
- db.logout();
- }
- }());
+ db.logout();
+ }
+}());
- (function testPrivilegesForWholeDB() {
- // Test that users without the required privileges cannot open a change stream. A user needs
- // both the 'find' and 'changeStream' action on the database. Note in particular that the
- // whole-cluster privileges (specified with {cluster: true}) is not enough to open a change
- // stream on the whole database.
- for (let userWithoutPrivileges of[{db: db, name: "find_only"},
- {db: db, name: "change_stream_only"},
- {db: db, name: "find_and_change_stream"},
- {db: db, name: "write"},
- {db: db, name: "db_find_only"},
- {db: db, name: "db_change_stream_only"},
- {db: db, name: "db_write"},
- {db: adminDB, name: "admin_db_find_only"},
- {db: adminDB, name: "admin_db_find_and_change_stream"},
- {db: adminDB, name: "admin_db_change_stream_only"},
- {db: adminDB, name: "admin_db_read"},
- {db: adminDB, name: "any_db_find_only"},
- {db: adminDB, name: "any_db_change_stream_only"},
- {db: adminDB, name: "cluster_find_only"},
- {db: adminDB, name: "cluster_find_and_change_stream"},
- {db: adminDB, name: "cluster_change_stream_only"}]) {
- jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream` +
- ` on the whole database`);
- const db = userWithoutPrivileges.db;
- assert(db.auth(userWithoutPrivileges.name, password));
+(function testPrivilegesForWholeDB() {
+ // Test that users without the required privileges cannot open a change stream. A user needs
+ // both the 'find' and 'changeStream' action on the database. Note in particular that the
+ // whole-cluster privileges (specified with {cluster: true}) is not enough to open a change
+ // stream on the whole database.
+ for (let userWithoutPrivileges of [{db: db, name: "find_only"},
+ {db: db, name: "change_stream_only"},
+ {db: db, name: "find_and_change_stream"},
+ {db: db, name: "write"},
+ {db: db, name: "db_find_only"},
+ {db: db, name: "db_change_stream_only"},
+ {db: db, name: "db_write"},
+ {db: adminDB, name: "admin_db_find_only"},
+ {db: adminDB, name: "admin_db_find_and_change_stream"},
+ {db: adminDB, name: "admin_db_change_stream_only"},
+ {db: adminDB, name: "admin_db_read"},
+ {db: adminDB, name: "any_db_find_only"},
+ {db: adminDB, name: "any_db_change_stream_only"},
+ {db: adminDB, name: "cluster_find_only"},
+ {db: adminDB, name: "cluster_find_and_change_stream"},
+ {db: adminDB, name: "cluster_change_stream_only"}]) {
+ jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream` +
+ ` on the whole database`);
+ const db = userWithoutPrivileges.db;
+ assert(db.auth(userWithoutPrivileges.name, password));
- assert.commandFailedWithCode(
- coll.getDB().runCommand(
- {aggregate: 1, pipeline: [{$changeStream: {}}], cursor: {}}),
- ErrorCodes.Unauthorized);
+ assert.commandFailedWithCode(
+ coll.getDB().runCommand({aggregate: 1, pipeline: [{$changeStream: {}}], cursor: {}}),
+ ErrorCodes.Unauthorized);
- db.logout();
- }
+ db.logout();
+ }
- // Test that a user with the required privileges can open a change stream.
- for (let userWithPrivileges of[{db: db, name: "db_find_and_change_stream"},
- {db: db, name: "db_read"},
- {db: adminDB, name: "any_db_find_and_change_stream"}]) {
- jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream on` +
- ` the whole database`);
- const db = userWithPrivileges.db;
- assert(db.auth(userWithPrivileges.name, password));
+ // Test that a user with the required privileges can open a change stream.
+ for (let userWithPrivileges of [{db: db, name: "db_find_and_change_stream"},
+ {db: db, name: "db_read"},
+ {db: adminDB, name: "any_db_find_and_change_stream"}]) {
+ jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream on` +
+ ` the whole database`);
+ const db = userWithPrivileges.db;
+ assert(db.auth(userWithPrivileges.name, password));
- assert.doesNotThrow(() => coll.getDB().watch());
+ assert.doesNotThrow(() => coll.getDB().watch());
- db.logout();
- }
- }());
+ db.logout();
+ }
+}());
- (function testPrivilegesForWholeCluster() {
- // Test that users without the required privileges cannot open a change stream. A user needs
- // both the 'find' and 'changeStream' action on _any_ resource. Note in particular that the
- // whole-cluster privileges (specified with {cluster: true}) is not enough to open a change
- // stream on the whole cluster.
- for (let userWithoutPrivileges of[{db: db, name: "find_only"},
- {db: db, name: "change_stream_only"},
- {db: db, name: "find_and_change_stream"},
- {db: db, name: "write"},
- {db: db, name: "db_find_only"},
- {db: db, name: "db_find_and_change_stream"},
- {db: db, name: "db_change_stream_only"},
- {db: db, name: "db_read"},
- {db: db, name: "db_write"},
- {db: adminDB, name: "admin_db_find_only"},
- {db: adminDB, name: "admin_db_find_and_change_stream"},
- {db: adminDB, name: "admin_db_change_stream_only"},
- {db: adminDB, name: "admin_db_read"},
- {db: adminDB, name: "any_db_find_only"},
- {db: adminDB, name: "any_db_change_stream_only"},
- {db: adminDB, name: "cluster_find_only"},
- {db: adminDB, name: "cluster_change_stream_only"},
- {db: adminDB, name: "cluster_find_and_change_stream"}]) {
- jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream` +
- ` on the whole cluster`);
- const db = userWithoutPrivileges.db;
- assert(db.auth(userWithoutPrivileges.name, password));
+(function testPrivilegesForWholeCluster() {
+ // Test that users without the required privileges cannot open a change stream. A user needs
+ // both the 'find' and 'changeStream' action on _any_ resource. Note in particular that the
+ // whole-cluster privileges (specified with {cluster: true}) is not enough to open a change
+ // stream on the whole cluster.
+ for (let userWithoutPrivileges of [{db: db, name: "find_only"},
+ {db: db, name: "change_stream_only"},
+ {db: db, name: "find_and_change_stream"},
+ {db: db, name: "write"},
+ {db: db, name: "db_find_only"},
+ {db: db, name: "db_find_and_change_stream"},
+ {db: db, name: "db_change_stream_only"},
+ {db: db, name: "db_read"},
+ {db: db, name: "db_write"},
+ {db: adminDB, name: "admin_db_find_only"},
+ {db: adminDB, name: "admin_db_find_and_change_stream"},
+ {db: adminDB, name: "admin_db_change_stream_only"},
+ {db: adminDB, name: "admin_db_read"},
+ {db: adminDB, name: "any_db_find_only"},
+ {db: adminDB, name: "any_db_change_stream_only"},
+ {db: adminDB, name: "cluster_find_only"},
+ {db: adminDB, name: "cluster_change_stream_only"},
+ {db: adminDB, name: "cluster_find_and_change_stream"}]) {
+ jsTestLog(`Testing user ${tojson(userWithoutPrivileges)} cannot open a change stream` +
+ ` on the whole cluster`);
+ const db = userWithoutPrivileges.db;
+ assert(db.auth(userWithoutPrivileges.name, password));
- assert.commandFailedWithCode(adminDB.runCommand({
- aggregate: 1,
- pipeline: [{$changeStream: {allChangesForCluster: true}}],
- cursor: {}
- }),
- ErrorCodes.Unauthorized);
+ assert.commandFailedWithCode(adminDB.runCommand({
+ aggregate: 1,
+ pipeline: [{$changeStream: {allChangesForCluster: true}}],
+ cursor: {}
+ }),
+ ErrorCodes.Unauthorized);
- db.logout();
- }
+ db.logout();
+ }
- // Test that a user with the required privileges can open a change stream.
- for (let userWithPrivileges of[{db: adminDB, name: "any_db_find_and_change_stream"}]) {
- jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream` +
- ` on the whole cluster`);
- const db = userWithPrivileges.db;
- assert(db.auth(userWithPrivileges.name, password));
+ // Test that a user with the required privileges can open a change stream.
+ for (let userWithPrivileges of [{db: adminDB, name: "any_db_find_and_change_stream"}]) {
+ jsTestLog(`Testing user ${tojson(userWithPrivileges)} _can_ open a change stream` +
+ ` on the whole cluster`);
+ const db = userWithPrivileges.db;
+ assert(db.auth(userWithPrivileges.name, password));
- assert.doesNotThrow(() => db.getMongo().watch());
+ assert.doesNotThrow(() => db.getMongo().watch());
- db.logout();
- }
- }());
- rst.stopSet();
+ db.logout();
+ }
+}());
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js b/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js
index 73cb523ce49..1dd5bdd83ed 100644
--- a/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js
+++ b/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js
@@ -5,65 +5,64 @@
* @tags: [requires_replication, requires_journaling, requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- const st =
- new ShardingTest({shards: 2, rs: {nodes: 1, setParameter: {writePeriodicNoops: false}}});
+const st = new ShardingTest({shards: 2, rs: {nodes: 1, setParameter: {writePeriodicNoops: false}}});
- const mongosDB = st.s.startSession({causalConsistency: true}).getDatabase(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = st.s.startSession({causalConsistency: true}).getDatabase(jsTestName());
+const mongosColl = mongosDB.test;
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Shard on {_id:1}, split at {_id:0}, and move the upper chunk to shard1.
- st.shardColl(mongosColl, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName(), true);
+// Shard on {_id:1}, split at {_id:0}, and move the upper chunk to shard1.
+st.shardColl(mongosColl, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName(), true);
- // Write one document to each shard.
- assert.commandWorked(mongosColl.insert({_id: -10}));
- assert.commandWorked(mongosColl.insert({_id: 10}));
+// Write one document to each shard.
+assert.commandWorked(mongosColl.insert({_id: -10}));
+assert.commandWorked(mongosColl.insert({_id: 10}));
- // Open a change stream cursor to listen for subsequent events.
- let csCursor = mongosColl.watch([], {cursor: {batchSize: 1}});
+// Open a change stream cursor to listen for subsequent events.
+let csCursor = mongosColl.watch([], {cursor: {batchSize: 1}});
- // Update both documents in the collection, such that the events are likely to have the same
- // clusterTime. We update twice to ensure that the PBRT for both shards moves past the first two
- // updates.
- assert.commandWorked(mongosColl.update({}, {$set: {updated: 1}}, {multi: true}));
- assert.commandWorked(mongosColl.update({}, {$set: {updatedAgain: 1}}, {multi: true}));
+// Update both documents in the collection, such that the events are likely to have the same
+// clusterTime. We update twice to ensure that the PBRT for both shards moves past the first two
+// updates.
+assert.commandWorked(mongosColl.update({}, {$set: {updated: 1}}, {multi: true}));
+assert.commandWorked(mongosColl.update({}, {$set: {updatedAgain: 1}}, {multi: true}));
- // Retrieve the first two events and confirm that they are in order with non-descending
- // clusterTime. Unfortunately we cannot guarantee that clusterTime will be identical, since it
- // is based on each shard's local value and there are operations beyond noop write that can
- // bump the oplog timestamp. We expect however that they will be identical for most test runs,
- // so there is value in testing.
- let clusterTime = null, updateEvent = null;
- for (let x = 0; x < 2; ++x) {
- assert.soon(() => csCursor.hasNext());
- updateEvent = csCursor.next();
- clusterTime = (clusterTime || updateEvent.clusterTime);
- assert.gte(updateEvent.clusterTime, clusterTime);
- assert.eq(updateEvent.updateDescription.updatedFields.updated, 1);
- }
+// Retrieve the first two events and confirm that they are in order with non-descending
+// clusterTime. Unfortunately we cannot guarantee that clusterTime will be identical, since it
+// is based on each shard's local value and there are operations beyond noop write that can
+// bump the oplog timestamp. We expect however that they will be identical for most test runs,
+// so there is value in testing.
+let clusterTime = null, updateEvent = null;
+for (let x = 0; x < 2; ++x) {
assert.soon(() => csCursor.hasNext());
+ updateEvent = csCursor.next();
+ clusterTime = (clusterTime || updateEvent.clusterTime);
+ assert.gte(updateEvent.clusterTime, clusterTime);
+ assert.eq(updateEvent.updateDescription.updatedFields.updated, 1);
+}
+assert.soon(() => csCursor.hasNext());
- // Update both documents again, so that we will have something to observe after resuming.
- assert.commandWorked(mongosColl.update({}, {$set: {updatedYetAgain: 1}}, {multi: true}));
+// Update both documents again, so that we will have something to observe after resuming.
+assert.commandWorked(mongosColl.update({}, {$set: {updatedYetAgain: 1}}, {multi: true}));
- // Resume from the second update, and confirm that we only see events starting with the third
- // and fourth updates. We use batchSize:1 to induce mongoD to send each individual event to the
- // mongoS when resuming, rather than scanning all the way to the most recent point in its oplog.
- csCursor = mongosColl.watch([], {resumeAfter: updateEvent._id, cursor: {batchSize: 1}});
- clusterTime = updateEvent = null;
- for (let x = 0; x < 2; ++x) {
- assert.soon(() => csCursor.hasNext());
- updateEvent = csCursor.next();
- clusterTime = (clusterTime || updateEvent.clusterTime);
- assert.gte(updateEvent.clusterTime, clusterTime);
- assert.eq(updateEvent.updateDescription.updatedFields.updatedAgain, 1);
- }
+// Resume from the second update, and confirm that we only see events starting with the third
+// and fourth updates. We use batchSize:1 to induce mongoD to send each individual event to the
+// mongoS when resuming, rather than scanning all the way to the most recent point in its oplog.
+csCursor = mongosColl.watch([], {resumeAfter: updateEvent._id, cursor: {batchSize: 1}});
+clusterTime = updateEvent = null;
+for (let x = 0; x < 2; ++x) {
assert.soon(() => csCursor.hasNext());
+ updateEvent = csCursor.next();
+ clusterTime = (clusterTime || updateEvent.clusterTime);
+ assert.gte(updateEvent.clusterTime, clusterTime);
+ assert.eq(updateEvent.updateDescription.updatedFields.updatedAgain, 1);
+}
+assert.soon(() => csCursor.hasNext());
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js b/jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js
index 6ac410870a9..e8cdf1dc722 100644
--- a/jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js
+++ b/jstests/noPassthrough/change_streams_resume_same_clustertime_different_uuid.js
@@ -5,94 +5,94 @@
* @tags: [requires_sharding, uses_change_streams]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/fixture_helpers.js"); // For runCommandOnEachPrimary.
+load("jstests/libs/fixture_helpers.js"); // For runCommandOnEachPrimary.
- // Asserts that the expected operation type and documentKey are found on the change stream
- // cursor. Returns the change stream document.
- function assertWriteVisible({cursor, opType, docKey}) {
- assert.soon(() => cursor.hasNext());
- const changeDoc = cursor.next();
- assert.eq(opType, changeDoc.operationType, changeDoc);
- assert.eq(docKey, changeDoc.documentKey, changeDoc);
- return changeDoc;
- }
+// Asserts that the expected operation type and documentKey are found on the change stream
+// cursor. Returns the change stream document.
+function assertWriteVisible({cursor, opType, docKey}) {
+ assert.soon(() => cursor.hasNext());
+ const changeDoc = cursor.next();
+ assert.eq(opType, changeDoc.operationType, changeDoc);
+ assert.eq(docKey, changeDoc.documentKey, changeDoc);
+ return changeDoc;
+}
- // Create a new cluster with 2 shards. Disable periodic no-ops to ensure that we have control
- // over the ordering of events across the cluster.
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: false, periodicNoopIntervalSecs: 1}}
- });
+// Create a new cluster with 2 shards. Disable periodic no-ops to ensure that we have control
+// over the ordering of events across the cluster.
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: false, periodicNoopIntervalSecs: 1}}
+});
- // Create two databases. We will place one of these on each shard.
- const mongosDB0 = st.s.getDB(`${jsTestName()}_0`);
- const mongosDB1 = st.s.getDB(`${jsTestName()}_1`);
- const adminDB = st.s.getDB("admin");
+// Create two databases. We will place one of these on each shard.
+const mongosDB0 = st.s.getDB(`${jsTestName()}_0`);
+const mongosDB1 = st.s.getDB(`${jsTestName()}_1`);
+const adminDB = st.s.getDB("admin");
- // Enable sharding on mongosDB0 and ensure its primary is shard0.
- assert.commandWorked(mongosDB0.adminCommand({enableSharding: mongosDB0.getName()}));
- st.ensurePrimaryShard(mongosDB0.getName(), st.rs0.getURL());
+// Enable sharding on mongosDB0 and ensure its primary is shard0.
+assert.commandWorked(mongosDB0.adminCommand({enableSharding: mongosDB0.getName()}));
+st.ensurePrimaryShard(mongosDB0.getName(), st.rs0.getURL());
- // Enable sharding on mongosDB1 and ensure its primary is shard1.
- assert.commandWorked(mongosDB1.adminCommand({enableSharding: mongosDB1.getName()}));
- st.ensurePrimaryShard(mongosDB1.getName(), st.rs1.getURL());
+// Enable sharding on mongosDB1 and ensure its primary is shard1.
+assert.commandWorked(mongosDB1.adminCommand({enableSharding: mongosDB1.getName()}));
+st.ensurePrimaryShard(mongosDB1.getName(), st.rs1.getURL());
- // Open a connection to a different collection on each shard. We use direct connections to
- // ensure that the oplog timestamps across the shards overlap.
- const coll0 = st.rs0.getPrimary().getCollection(`${mongosDB0.getName()}.test`);
- const coll1 = st.rs1.getPrimary().getCollection(`${mongosDB1.getName()}.test`);
+// Open a connection to a different collection on each shard. We use direct connections to
+// ensure that the oplog timestamps across the shards overlap.
+const coll0 = st.rs0.getPrimary().getCollection(`${mongosDB0.getName()}.test`);
+const coll1 = st.rs1.getPrimary().getCollection(`${mongosDB1.getName()}.test`);
- // Open a change stream on the test cluster. We will capture events in 'changeList'.
- const changeStreamCursor = adminDB.aggregate([{$changeStream: {allChangesForCluster: true}}]);
- const changeList = [];
+// Open a change stream on the test cluster. We will capture events in 'changeList'.
+const changeStreamCursor = adminDB.aggregate([{$changeStream: {allChangesForCluster: true}}]);
+const changeList = [];
- // Insert ten documents on each shard, alternating between the two collections.
- for (let i = 0; i < 20; ++i) {
- const coll = (i % 2 ? coll1 : coll0);
- assert.commandWorked(coll.insert({shard: (i % 2)}));
- }
+// Insert ten documents on each shard, alternating between the two collections.
+for (let i = 0; i < 20; ++i) {
+ const coll = (i % 2 ? coll1 : coll0);
+ assert.commandWorked(coll.insert({shard: (i % 2)}));
+}
- // Verify that each shard now has ten total documents present in the associated collection.
- assert.eq(st.rs0.getPrimary().getCollection(coll0.getFullName()).count(), 10);
- assert.eq(st.rs1.getPrimary().getCollection(coll1.getFullName()).count(), 10);
+// Verify that each shard now has ten total documents present in the associated collection.
+assert.eq(st.rs0.getPrimary().getCollection(coll0.getFullName()).count(), 10);
+assert.eq(st.rs1.getPrimary().getCollection(coll1.getFullName()).count(), 10);
- // Re-enable 'writePeriodicNoops' to ensure that all change stream events are returned.
- FixtureHelpers.runCommandOnEachPrimary(
- {db: adminDB, cmdObj: {setParameter: 1, writePeriodicNoops: true}});
+// Re-enable 'writePeriodicNoops' to ensure that all change stream events are returned.
+FixtureHelpers.runCommandOnEachPrimary(
+ {db: adminDB, cmdObj: {setParameter: 1, writePeriodicNoops: true}});
- // Read the stream of events, capture them in 'changeList', and confirm that all events occurred
- // at or later than the clusterTime of the first event. Unfortunately, we cannot guarantee that
- // corresponding events occurred at the same clusterTime on both shards; we expect, however,
- // that this will be true in the vast majority of runs, and so there is value in testing.
- for (let i = 0; i < 20; ++i) {
- assert.soon(() => changeStreamCursor.hasNext());
- changeList.push(changeStreamCursor.next());
- }
- const clusterTime = changeList[0].clusterTime;
- for (let event of changeList) {
- assert.gte(event.clusterTime, clusterTime);
- }
+// Read the stream of events, capture them in 'changeList', and confirm that all events occurred
+// at or later than the clusterTime of the first event. Unfortunately, we cannot guarantee that
+// corresponding events occurred at the same clusterTime on both shards; we expect, however,
+// that this will be true in the vast majority of runs, and so there is value in testing.
+for (let i = 0; i < 20; ++i) {
+ assert.soon(() => changeStreamCursor.hasNext());
+ changeList.push(changeStreamCursor.next());
+}
+const clusterTime = changeList[0].clusterTime;
+for (let event of changeList) {
+ assert.gte(event.clusterTime, clusterTime);
+}
- // Test that resuming from each event returns the expected set of subsequent documents.
- for (let i = 0; i < changeList.length; ++i) {
- const resumeCursor = adminDB.aggregate(
- [{$changeStream: {allChangesForCluster: true, resumeAfter: changeList[i]._id}}]);
+// Test that resuming from each event returns the expected set of subsequent documents.
+for (let i = 0; i < changeList.length; ++i) {
+ const resumeCursor = adminDB.aggregate(
+ [{$changeStream: {allChangesForCluster: true, resumeAfter: changeList[i]._id}}]);
- // Confirm that the first event in the resumed stream matches the next event recorded in
- // 'changeList' from the original stream. The order of the events should be stable across
- // resumes from any point.
- for (let x = (i + 1); x < changeList.length; ++x) {
- const expectedChangeDoc = changeList[x];
- assertWriteVisible({
- cursor: resumeCursor,
- opType: expectedChangeDoc.operationType,
- docKey: expectedChangeDoc.documentKey
- });
- }
- resumeCursor.close();
+ // Confirm that the first event in the resumed stream matches the next event recorded in
+ // 'changeList' from the original stream. The order of the events should be stable across
+ // resumes from any point.
+ for (let x = (i + 1); x < changeList.length; ++x) {
+ const expectedChangeDoc = changeList[x];
+ assertWriteVisible({
+ cursor: resumeCursor,
+ opType: expectedChangeDoc.operationType,
+ docKey: expectedChangeDoc.documentKey
+ });
}
+ resumeCursor.close();
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js b/jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js
index c5a27b57e63..0509ff2b3cd 100644
--- a/jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js
+++ b/jstests/noPassthrough/change_streams_resume_token_applyops_overlap.js
@@ -5,94 +5,94 @@
* @tags: [requires_sharding, uses_multi_shard_transaction, uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- // Asserts that the expected operation type and documentKey are found on the change stream
- // cursor. Returns the change stream document.
- function assertWriteVisible({cursor, opType, docKey}) {
- assert.soon(() => cursor.hasNext());
- const changeDoc = cursor.next();
- assert.eq(opType, changeDoc.operationType, changeDoc);
- assert.eq(docKey, changeDoc.documentKey, changeDoc);
- return changeDoc;
- }
+// Asserts that the expected operation type and documentKey are found on the change stream
+// cursor. Returns the change stream document.
+function assertWriteVisible({cursor, opType, docKey}) {
+ assert.soon(() => cursor.hasNext());
+ const changeDoc = cursor.next();
+ assert.eq(opType, changeDoc.operationType, changeDoc);
+ assert.eq(docKey, changeDoc.documentKey, changeDoc);
+ return changeDoc;
+}
- // Create a new cluster with 2 shards. Enable 1-second period no-ops to ensure that all relevant
- // events eventually become available.
- const st = new ShardingTest({
- shards: 2,
- rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
- });
+// Create a new cluster with 2 shards. Enable 1-second period no-ops to ensure that all relevant
+// events eventually become available.
+const st = new ShardingTest({
+ shards: 2,
+ rs: {nodes: 1, setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}}
+});
- const mongosDB = st.s.getDB(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = st.s.getDB(jsTestName());
+const mongosColl = mongosDB.test;
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
- // Shard on {shard:1}, split at {shard:1}, and move the upper chunk to shard1.
- st.shardColl(mongosColl, {shard: 1}, {shard: 1}, {shard: 1}, mongosDB.getName(), true);
+// Shard on {shard:1}, split at {shard:1}, and move the upper chunk to shard1.
+st.shardColl(mongosColl, {shard: 1}, {shard: 1}, {shard: 1}, mongosDB.getName(), true);
- // Seed each shard with one document.
- assert.commandWorked(
- mongosColl.insert([{shard: 0, _id: "initial_doc"}, {shard: 1, _id: "initial doc"}]));
+// Seed each shard with one document.
+assert.commandWorked(
+ mongosColl.insert([{shard: 0, _id: "initial_doc"}, {shard: 1, _id: "initial doc"}]));
- // Start a transaction which will be used to write documents across both shards.
- const session = mongosDB.getMongo().startSession();
- const sessionDB = session.getDatabase(mongosDB.getName());
- const sessionColl = sessionDB[mongosColl.getName()];
- session.startTransaction({readConcern: {level: "majority"}});
+// Start a transaction which will be used to write documents across both shards.
+const session = mongosDB.getMongo().startSession();
+const sessionDB = session.getDatabase(mongosDB.getName());
+const sessionColl = sessionDB[mongosColl.getName()];
+session.startTransaction({readConcern: {level: "majority"}});
- // Open a change stream on the test collection. We will capture events in 'changeList'.
- const changeStreamCursor = mongosColl.watch();
- const changeList = [];
+// Open a change stream on the test collection. We will capture events in 'changeList'.
+const changeStreamCursor = mongosColl.watch();
+const changeList = [];
- // Insert four documents on each shard under the transaction.
- assert.commandWorked(
- sessionColl.insert([{shard: 0, _id: "txn1-doc-0"}, {shard: 1, _id: "txn1-doc-1"}]));
- assert.commandWorked(
- sessionColl.insert([{shard: 0, _id: "txn1-doc-2"}, {shard: 1, _id: "txn1-doc-3"}]));
- assert.commandWorked(
- sessionColl.insert([{shard: 0, _id: "txn1-doc-4"}, {shard: 1, _id: "txn1-doc-5"}]));
- assert.commandWorked(
- sessionColl.insert([{shard: 0, _id: "txn1-doc-6"}, {shard: 1, _id: "txn1-doc-7"}]));
+// Insert four documents on each shard under the transaction.
+assert.commandWorked(
+ sessionColl.insert([{shard: 0, _id: "txn1-doc-0"}, {shard: 1, _id: "txn1-doc-1"}]));
+assert.commandWorked(
+ sessionColl.insert([{shard: 0, _id: "txn1-doc-2"}, {shard: 1, _id: "txn1-doc-3"}]));
+assert.commandWorked(
+ sessionColl.insert([{shard: 0, _id: "txn1-doc-4"}, {shard: 1, _id: "txn1-doc-5"}]));
+assert.commandWorked(
+ sessionColl.insert([{shard: 0, _id: "txn1-doc-6"}, {shard: 1, _id: "txn1-doc-7"}]));
- // Commit the transaction.
- assert.commandWorked(session.commitTransaction_forTesting());
+// Commit the transaction.
+assert.commandWorked(session.commitTransaction_forTesting());
- // Read the stream of events, capture them in 'changeList', and confirm that all events occurred
- // at or later than the clusterTime of the first event. Unfortunately, we cannot guarantee that
- // all events occurred at the same clusterTime on both shards, even in the case where all events
- // occur within a single transaction. We expect, however, that this will be true in the vast
- // majority of test runs, and so there is value in retaining this test.
- for (let i = 0; i < 8; ++i) {
- assert.soon(() => changeStreamCursor.hasNext());
- changeList.push(changeStreamCursor.next());
- }
- const clusterTime = changeList[0].clusterTime;
- for (let event of changeList) {
- assert.gte(event.clusterTime, clusterTime);
- }
+// Read the stream of events, capture them in 'changeList', and confirm that all events occurred
+// at or later than the clusterTime of the first event. Unfortunately, we cannot guarantee that
+// all events occurred at the same clusterTime on both shards, even in the case where all events
+// occur within a single transaction. We expect, however, that this will be true in the vast
+// majority of test runs, and so there is value in retaining this test.
+for (let i = 0; i < 8; ++i) {
+ assert.soon(() => changeStreamCursor.hasNext());
+ changeList.push(changeStreamCursor.next());
+}
+const clusterTime = changeList[0].clusterTime;
+for (let event of changeList) {
+ assert.gte(event.clusterTime, clusterTime);
+}
- // Test that resuming from each event returns the expected set of subsequent documents.
- for (let i = 0; i < changeList.length; ++i) {
- const resumeCursor = mongosColl.watch([], {startAfter: changeList[i]._id});
+// Test that resuming from each event returns the expected set of subsequent documents.
+for (let i = 0; i < changeList.length; ++i) {
+ const resumeCursor = mongosColl.watch([], {startAfter: changeList[i]._id});
- // Confirm that the first event in the resumed stream matches the next event recorded in
- // 'changeList' from the original stream. The order of the events should be stable across
- // resumes from any point.
- for (let x = (i + 1); x < changeList.length; ++x) {
- const expectedChangeDoc = changeList[x];
- assertWriteVisible({
- cursor: resumeCursor,
- opType: expectedChangeDoc.operationType,
- docKey: expectedChangeDoc.documentKey
- });
- }
- assert(!resumeCursor.hasNext(), () => `Unexpected event: ${tojson(resumeCursor.next())}`);
- resumeCursor.close();
+ // Confirm that the first event in the resumed stream matches the next event recorded in
+ // 'changeList' from the original stream. The order of the events should be stable across
+ // resumes from any point.
+ for (let x = (i + 1); x < changeList.length; ++x) {
+ const expectedChangeDoc = changeList[x];
+ assertWriteVisible({
+ cursor: resumeCursor,
+ opType: expectedChangeDoc.operationType,
+ docKey: expectedChangeDoc.documentKey
+ });
}
+ assert(!resumeCursor.hasNext(), () => `Unexpected event: ${tojson(resumeCursor.next())}`);
+ resumeCursor.close();
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/change_streams_shell_helper_resume_token.js b/jstests/noPassthrough/change_streams_shell_helper_resume_token.js
index 4e6e42c6406..0e62c649d00 100644
--- a/jstests/noPassthrough/change_streams_shell_helper_resume_token.js
+++ b/jstests/noPassthrough/change_streams_shell_helper_resume_token.js
@@ -5,95 +5,95 @@
* @tags: [requires_journaling]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- // Create a new single-node replica set, and ensure that it can support $changeStream.
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
+// Create a new single-node replica set, and ensure that it can support $changeStream.
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
+rst.initiate();
- const db = rst.getPrimary().getDB(jsTestName());
- const collName = "change_stream_shell_helper_resume_token";
- const csCollection = assertDropAndRecreateCollection(db, collName);
- const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
+const db = rst.getPrimary().getDB(jsTestName());
+const collName = "change_stream_shell_helper_resume_token";
+const csCollection = assertDropAndRecreateCollection(db, collName);
+const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
- const batchSize = 5;
- let docId = 0;
+const batchSize = 5;
+let docId = 0;
- // Test that getResumeToken() returns the postBatchResumeToken when an empty batch is received.
- const csCursor = csCollection.watch([], {cursor: {batchSize: batchSize}});
- assert(!csCursor.hasNext());
- let curResumeToken = csCursor.getResumeToken();
- assert.neq(undefined, curResumeToken);
+// Test that getResumeToken() returns the postBatchResumeToken when an empty batch is received.
+const csCursor = csCollection.watch([], {cursor: {batchSize: batchSize}});
+assert(!csCursor.hasNext());
+let curResumeToken = csCursor.getResumeToken();
+assert.neq(undefined, curResumeToken);
- // Test that advancing the oplog time updates the postBatchResumeToken, even with no results.
- assert.commandWorked(otherCollection.insert({}));
- let prevResumeToken = curResumeToken;
- assert.soon(() => {
- assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
- prevResumeToken = curResumeToken;
- curResumeToken = csCursor.getResumeToken();
- assert.neq(undefined, curResumeToken);
- return bsonWoCompare(curResumeToken, prevResumeToken) > 0;
- });
+// Test that advancing the oplog time updates the postBatchResumeToken, even with no results.
+assert.commandWorked(otherCollection.insert({}));
+let prevResumeToken = curResumeToken;
+assert.soon(() => {
+ assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
+ prevResumeToken = curResumeToken;
+ curResumeToken = csCursor.getResumeToken();
+ assert.neq(undefined, curResumeToken);
+ return bsonWoCompare(curResumeToken, prevResumeToken) > 0;
+});
- // Insert 9 documents into the collection, followed by a write to the unrelated collection.
- for (let i = 0; i < 9; ++i) {
- assert.commandWorked(csCollection.insert({_id: ++docId}));
- }
- assert.commandWorked(otherCollection.insert({}));
+// Insert 9 documents into the collection, followed by a write to the unrelated collection.
+for (let i = 0; i < 9; ++i) {
+ assert.commandWorked(csCollection.insert({_id: ++docId}));
+}
+assert.commandWorked(otherCollection.insert({}));
- // Retrieve the first batch of events from the cursor.
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+// Retrieve the first batch of events from the cursor.
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- // We have not yet iterated any of the events. Verify that the resume token is unchanged.
- assert.docEq(curResumeToken, csCursor.getResumeToken());
+// We have not yet iterated any of the events. Verify that the resume token is unchanged.
+assert.docEq(curResumeToken, csCursor.getResumeToken());
- // For each event in the first batch, the resume token should match the document's _id.
- let currentDoc = null;
- while (csCursor.objsLeftInBatch()) {
- currentDoc = csCursor.next();
- prevResumeToken = curResumeToken;
- curResumeToken = csCursor.getResumeToken();
- assert.docEq(curResumeToken, currentDoc._id);
- assert.gt(bsonWoCompare(curResumeToken, prevResumeToken), 0);
- }
+// For each event in the first batch, the resume token should match the document's _id.
+let currentDoc = null;
+while (csCursor.objsLeftInBatch()) {
+ currentDoc = csCursor.next();
+ prevResumeToken = curResumeToken;
+ curResumeToken = csCursor.getResumeToken();
+ assert.docEq(curResumeToken, currentDoc._id);
+ assert.gt(bsonWoCompare(curResumeToken, prevResumeToken), 0);
+}
- // Retrieve the second batch of events from the cursor.
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+// Retrieve the second batch of events from the cursor.
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- // We haven't pulled any events out of the cursor yet, so the resumeToken should be unchanged.
- assert.docEq(curResumeToken, csCursor.getResumeToken());
+// We haven't pulled any events out of the cursor yet, so the resumeToken should be unchanged.
+assert.docEq(curResumeToken, csCursor.getResumeToken());
- // For all but the final event, the resume token should match the document's _id.
- while ((currentDoc = csCursor.next()).fullDocument._id < docId) {
- assert.soon(() => csCursor.hasNext());
- prevResumeToken = curResumeToken;
- curResumeToken = csCursor.getResumeToken();
- assert.docEq(curResumeToken, currentDoc._id);
- assert.gt(bsonWoCompare(curResumeToken, prevResumeToken), 0);
- }
- // When we reach here, 'currentDoc' is the final document in the batch, but we have not yet
- // updated the resume token. Assert that this resume token sorts before currentDoc's.
+// For all but the final event, the resume token should match the document's _id.
+while ((currentDoc = csCursor.next()).fullDocument._id < docId) {
+ assert.soon(() => csCursor.hasNext());
prevResumeToken = curResumeToken;
- assert.gt(bsonWoCompare(currentDoc._id, prevResumeToken), 0);
+ curResumeToken = csCursor.getResumeToken();
+ assert.docEq(curResumeToken, currentDoc._id);
+ assert.gt(bsonWoCompare(curResumeToken, prevResumeToken), 0);
+}
+// When we reach here, 'currentDoc' is the final document in the batch, but we have not yet
+// updated the resume token. Assert that this resume token sorts before currentDoc's.
+prevResumeToken = curResumeToken;
+assert.gt(bsonWoCompare(currentDoc._id, prevResumeToken), 0);
- // After we have pulled the final document out of the cursor, the resume token should be the
- // postBatchResumeToken rather than the document's _id. Because we inserted an item into the
- // unrelated collection to push the oplog past the final event returned by the change stream,
- // this will be strictly greater than the final document's _id.
- assert.soon(() => {
- curResumeToken = csCursor.getResumeToken();
- assert(!csCursor.hasNext(), () => tojson(csCursor.next()));
- return bsonWoCompare(curResumeToken, currentDoc._id) > 0;
- });
+// After we have pulled the final document out of the cursor, the resume token should be the
+// postBatchResumeToken rather than the document's _id. Because we inserted an item into the
+// unrelated collection to push the oplog past the final event returned by the change stream,
+// this will be strictly greater than the final document's _id.
+assert.soon(() => {
+ curResumeToken = csCursor.getResumeToken();
+ assert(!csCursor.hasNext(), () => tojson(csCursor.next()));
+ return bsonWoCompare(curResumeToken, currentDoc._id) > 0;
+});
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/change_streams_update_lookup_collation.js b/jstests/noPassthrough/change_streams_update_lookup_collation.js
index 97c7e4013a5..996ce0e2c98 100644
--- a/jstests/noPassthrough/change_streams_update_lookup_collation.js
+++ b/jstests/noPassthrough/change_streams_update_lookup_collation.js
@@ -4,98 +4,99 @@
// Collation is only supported with the find command, not with op query.
// @tags: [requires_find_command, uses_change_streams]
(function() {
- "use strict";
-
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
-
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const db = rst.getPrimary().getDB("test");
- const coll = db[jsTestName()];
- const caseInsensitive = {locale: "en_US", strength: 2};
- assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive}));
-
- // Insert some documents that have similar _ids, but differ by case and diacritics. These _ids
- // would all match the collation on the strengthOneChangeStream, but should not be confused
- // during the update lookup using the strength 2 collection default collation.
- assert.writeOK(coll.insert({_id: "abc", x: "abc"}));
- assert.writeOK(coll.insert({_id: "abç", x: "ABC"}));
- assert.writeOK(coll.insert({_id: "åbC", x: "AbÇ"}));
-
- const changeStreamDefaultCollation = coll.aggregate(
- [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
- {collation: caseInsensitive});
-
- // Strength one will consider "ç" equal to "c" and "C".
- const strengthOneCollation = {locale: "en_US", strength: 1};
- const strengthOneChangeStream = coll.aggregate(
- [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
- {collation: strengthOneCollation});
-
- assert.writeOK(coll.update({_id: "abc"}, {$set: {updated: true}}));
-
- // Track the number of _id index usages to prove that the update lookup uses the _id index (and
- // therefore is using the correct collation for the lookup).
- function numIdIndexUsages() {
- return coll.aggregate([{$indexStats: {}}, {$match: {name: "_id_"}}])
- .toArray()[0]
- .accesses.ops;
- }
- const idIndexUsagesBeforeIteration = numIdIndexUsages();
-
- // Both cursors should produce a document describing this update, since the "x" value of the
- // first document will match both filters.
- assert.soon(() => changeStreamDefaultCollation.hasNext());
- assert.docEq(changeStreamDefaultCollation.next().fullDocument,
- {_id: "abc", x: "abc", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 1);
- assert.docEq(strengthOneChangeStream.next().fullDocument,
- {_id: "abc", x: "abc", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 2);
-
- assert.writeOK(coll.update({_id: "abç"}, {$set: {updated: true}}));
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 3);
-
- // Again, both cursors should produce a document describing this update.
- assert.soon(() => changeStreamDefaultCollation.hasNext());
- assert.docEq(changeStreamDefaultCollation.next().fullDocument,
- {_id: "abç", x: "ABC", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 4);
- assert.docEq(strengthOneChangeStream.next().fullDocument,
- {_id: "abç", x: "ABC", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 5);
-
- assert.writeOK(coll.update({_id: "åbC"}, {$set: {updated: true}}));
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 6);
-
- // Both $changeStream stages will see this update and both will look up the full document using
- // the foreign collection's default collation. However, the changeStreamDefaultCollation's
- // subsequent $match stage will reject the document because it does not consider "AbÇ" equal to
- // "abc". Only the strengthOneChangeStream will output the final document.
- assert.soon(() => strengthOneChangeStream.hasNext());
- assert.docEq(strengthOneChangeStream.next().fullDocument,
- {_id: "åbC", x: "AbÇ", updated: true});
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 7);
- assert(!changeStreamDefaultCollation.hasNext());
- assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 8);
-
- changeStreamDefaultCollation.close();
- strengthOneChangeStream.close();
- rst.stopSet();
+"use strict";
+
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const db = rst.getPrimary().getDB("test");
+const coll = db[jsTestName()];
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+assert.commandWorked(db.createCollection(coll.getName(), {collation: caseInsensitive}));
+
+// Insert some documents that have similar _ids, but differ by case and diacritics. These _ids
+// would all match the collation on the strengthOneChangeStream, but should not be confused
+// during the update lookup using the strength 2 collection default collation.
+assert.writeOK(coll.insert({_id: "abc", x: "abc"}));
+assert.writeOK(coll.insert({_id: "abç", x: "ABC"}));
+assert.writeOK(coll.insert({_id: "åbC", x: "AbÇ"}));
+
+const changeStreamDefaultCollation = coll.aggregate(
+ [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
+ {collation: caseInsensitive});
+
+// Strength one will consider "ç" equal to "c" and "C".
+const strengthOneCollation = {
+ locale: "en_US",
+ strength: 1
+};
+const strengthOneChangeStream = coll.aggregate(
+ [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {"fullDocument.x": "abc"}}],
+ {collation: strengthOneCollation});
+
+assert.writeOK(coll.update({_id: "abc"}, {$set: {updated: true}}));
+
+// Track the number of _id index usages to prove that the update lookup uses the _id index (and
+// therefore is using the correct collation for the lookup).
+function numIdIndexUsages() {
+ return coll.aggregate([{$indexStats: {}}, {$match: {name: "_id_"}}]).toArray()[0].accesses.ops;
+}
+const idIndexUsagesBeforeIteration = numIdIndexUsages();
+
+// Both cursors should produce a document describing this update, since the "x" value of the
+// first document will match both filters.
+assert.soon(() => changeStreamDefaultCollation.hasNext());
+assert.docEq(changeStreamDefaultCollation.next().fullDocument,
+ {_id: "abc", x: "abc", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 1);
+assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abc", x: "abc", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 2);
+
+assert.writeOK(coll.update({_id: "abç"}, {$set: {updated: true}}));
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 3);
+
+// Again, both cursors should produce a document describing this update.
+assert.soon(() => changeStreamDefaultCollation.hasNext());
+assert.docEq(changeStreamDefaultCollation.next().fullDocument,
+ {_id: "abç", x: "ABC", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 4);
+assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "abç", x: "ABC", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 5);
+
+assert.writeOK(coll.update({_id: "åbC"}, {$set: {updated: true}}));
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 6);
+
+// Both $changeStream stages will see this update and both will look up the full document using
+// the foreign collection's default collation. However, the changeStreamDefaultCollation's
+// subsequent $match stage will reject the document because it does not consider "AbÇ" equal to
+// "abc". Only the strengthOneChangeStream will output the final document.
+assert.soon(() => strengthOneChangeStream.hasNext());
+assert.docEq(strengthOneChangeStream.next().fullDocument, {_id: "åbC", x: "AbÇ", updated: true});
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 7);
+assert(!changeStreamDefaultCollation.hasNext());
+assert.eq(numIdIndexUsages(), idIndexUsagesBeforeIteration + 8);
+
+changeStreamDefaultCollation.close();
+strengthOneChangeStream.close();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/characterize_index_builds_on_restart.js b/jstests/noPassthrough/characterize_index_builds_on_restart.js
index 37cffa27ae9..ed055059bbf 100644
--- a/jstests/noPassthrough/characterize_index_builds_on_restart.js
+++ b/jstests/noPassthrough/characterize_index_builds_on_restart.js
@@ -8,239 +8,237 @@
* @tags: [requires_replication, requires_persistence, requires_majority_read_concern]
*/
(function() {
- 'use strict';
-
- load("jstests/libs/check_log.js");
- load("jstests/replsets/rslib.js");
-
- const dbName = "testDb";
- const collName = "testColl";
-
- const firstIndex = "firstIndex";
- const secondIndex = "secondIndex";
- const thirdIndex = "thirdIndex";
- const fourthIndex = "fourthIndex";
-
- const indexesToBuild = [
- {key: {i: 1}, name: firstIndex, background: true},
- {key: {j: 1}, name: secondIndex, background: true},
- {key: {i: 1, j: 1}, name: thirdIndex, background: true},
- {key: {i: -1, j: 1, k: -1}, name: fourthIndex, background: true},
- ];
-
- function startStandalone() {
- let mongod = MongoRunner.runMongod({cleanData: true});
- let db = mongod.getDB(dbName);
- db.dropDatabase();
- return mongod;
+'use strict';
+
+load("jstests/libs/check_log.js");
+load("jstests/replsets/rslib.js");
+
+const dbName = "testDb";
+const collName = "testColl";
+
+const firstIndex = "firstIndex";
+const secondIndex = "secondIndex";
+const thirdIndex = "thirdIndex";
+const fourthIndex = "fourthIndex";
+
+const indexesToBuild = [
+ {key: {i: 1}, name: firstIndex, background: true},
+ {key: {j: 1}, name: secondIndex, background: true},
+ {key: {i: 1, j: 1}, name: thirdIndex, background: true},
+ {key: {i: -1, j: 1, k: -1}, name: fourthIndex, background: true},
+];
+
+function startStandalone() {
+ let mongod = MongoRunner.runMongod({cleanData: true});
+ let db = mongod.getDB(dbName);
+ db.dropDatabase();
+ return mongod;
+}
+
+function restartStandalone(old) {
+ jsTest.log("Restarting mongod");
+ MongoRunner.stopMongod(old);
+ return MongoRunner.runMongod({restart: true, dbpath: old.dbpath, cleanData: false});
+}
+
+function shutdownStandalone(mongod) {
+ MongoRunner.stopMongod(mongod);
+}
+
+function startReplSet() {
+ let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, nodeOptions: {syncdelay: 1}});
+ let nodes = replSet.nodeList();
+
+ // We need an arbiter to ensure that the primary doesn't step down when we restart the
+ // secondary
+ replSet.startSet({startClean: true});
+ replSet.initiate(
+ {_id: "indexBuilds", members: [{_id: 0, host: nodes[0]}, {_id: 1, host: nodes[1]}]});
+
+ replSet.getPrimary().getDB(dbName).dropDatabase();
+ return replSet;
+}
+
+function stopReplSet(replSet) {
+ replSet.stopSet();
+}
+
+function addTestDocuments(db) {
+ let size = 100;
+ jsTest.log("Creating " + size + " test documents.");
+ var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i, j: i * i, k: 1});
}
-
- function restartStandalone(old) {
- jsTest.log("Restarting mongod");
- MongoRunner.stopMongod(old);
- return MongoRunner.runMongod({restart: true, dbpath: old.dbpath, cleanData: false});
+ assert.writeOK(bulk.execute());
+}
+
+function startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, writeConcern, secondaryDB) {
+ jsTest.log("Starting an index build on the secondary and leaving it unfinished.");
+
+ assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "alwaysOn"}));
+
+ // Do not generate the 'ns' field for index specs on the primary to test the absence of the
+ // field on restart.
+ assert.commandWorked(
+ primaryDB.adminCommand({setParameter: 1, disableIndexSpecNamespaceGeneration: 1}));
+
+ try {
+ let res = assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: collName, indexes: indexesToBuild, writeConcern: {w: writeConcern}}));
+
+ // Wait till all four index builds hang.
+ checkLog.containsWithCount(
+ secondaryDB,
+ "Index build interrupted due to \'leaveIndexBuildUnfinishedForShutdown\' " +
+ "failpoint. Mimicing shutdown error code.",
+ 4);
+
+ // Wait until the secondary has a checkpoint timestamp beyond the index oplog entry. On
+ // restart, replication recovery will not replay the createIndex oplog entries.
+ jsTest.log("Waiting for unfinished index build to be in checkpoint.");
+ assert.soon(() => {
+ let replSetStatus = assert.commandWorked(
+ secondaryDB.getSiblingDB("admin").runCommand({replSetGetStatus: 1}));
+ if (replSetStatus.lastStableCheckpointTimestamp >= res.operationTime)
+ return true;
+ });
+ } finally {
+ assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "off"}));
}
-
- function shutdownStandalone(mongod) {
- MongoRunner.stopMongod(mongod);
+}
+
+function checkForIndexRebuild(mongod, indexName, shouldExist) {
+ let adminDB = mongod.getDB("admin");
+ let collDB = mongod.getDB(dbName);
+ let logs = adminDB.runCommand({getLog: "global"});
+
+ let rebuildIndexLogEntry = false;
+ let dropIndexLogEntry = false;
+
+ /**
+ * The log should contain the following lines if it rebuilds or drops the index:
+ * Rebuilding index. Collection: `collNss` Index: `indexName`
+ * Dropping unfinished index. Collection: `collNss` Index: `indexName`
+ */
+ let rebuildIndexLine =
+ "Rebuilding index. Collection: " + dbName + "." + collName + " Index: " + indexName;
+ let dropIndexLine = "Dropping unfinished index. Collection: " + dbName + "." + collName +
+ " Index: " + indexName;
+ for (let line = 0; line < logs.log.length; line++) {
+ if (logs.log[line].includes(rebuildIndexLine))
+ rebuildIndexLogEntry = true;
+ else if (logs.log[line].includes(dropIndexLine))
+ dropIndexLogEntry = true;
}
- function startReplSet() {
- let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, nodeOptions: {syncdelay: 1}});
- let nodes = replSet.nodeList();
-
- // We need an arbiter to ensure that the primary doesn't step down when we restart the
- // secondary
- replSet.startSet({startClean: true});
- replSet.initiate(
- {_id: "indexBuilds", members: [{_id: 0, host: nodes[0]}, {_id: 1, host: nodes[1]}]});
+ // Can't be either missing both entries or have both entries for the given index name.
+ assert.neq(rebuildIndexLogEntry, dropIndexLogEntry);
- replSet.getPrimary().getDB(dbName).dropDatabase();
- return replSet;
- }
-
- function stopReplSet(replSet) {
- replSet.stopSet();
- }
+ // Ensure the index either exists or doesn't exist in the collection depending on the result
+ // of the log.
+ let collIndexes = collDB.getCollection(collName).getIndexes();
- function addTestDocuments(db) {
- let size = 100;
- jsTest.log("Creating " + size + " test documents.");
- var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i, j: i * i, k: 1});
+ let foundIndexEntry = false;
+ for (let index = 0; index < collIndexes.length; index++) {
+ assert.eq(true, collIndexes[index].hasOwnProperty('ns'));
+ if (collIndexes[index].name == indexName) {
+ foundIndexEntry = true;
+ break;
}
- assert.writeOK(bulk.execute());
}
- function startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, writeConcern, secondaryDB) {
- jsTest.log("Starting an index build on the secondary and leaving it unfinished.");
-
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "alwaysOn"}));
-
- // Do not generate the 'ns' field for index specs on the primary to test the absence of the
- // field on restart.
- assert.commandWorked(
- primaryDB.adminCommand({setParameter: 1, disableIndexSpecNamespaceGeneration: 1}));
-
- try {
- let res = assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: indexesToBuild,
- writeConcern: {w: writeConcern}
- }));
-
- // Wait till all four index builds hang.
- checkLog.containsWithCount(
- secondaryDB,
- "Index build interrupted due to \'leaveIndexBuildUnfinishedForShutdown\' " +
- "failpoint. Mimicing shutdown error code.",
- 4);
-
- // Wait until the secondary has a checkpoint timestamp beyond the index oplog entry. On
- // restart, replication recovery will not replay the createIndex oplog entries.
- jsTest.log("Waiting for unfinished index build to be in checkpoint.");
- assert.soon(() => {
- let replSetStatus = assert.commandWorked(
- secondaryDB.getSiblingDB("admin").runCommand({replSetGetStatus: 1}));
- if (replSetStatus.lastStableCheckpointTimestamp >= res.operationTime)
- return true;
- });
- } finally {
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "off"}));
- }
+ // If the log claims it rebuilt an unfinished index, the index must exist.
+ assert.eq(rebuildIndexLogEntry, foundIndexEntry);
+
+ // If the log claims it dropped an unfinished index, the index must not exist.
+ assert.eq(dropIndexLogEntry, !foundIndexEntry);
+
+ // Ensure our characterization matches the outcome of the index build.
+ assert.eq(foundIndexEntry, (shouldExist ? true : false));
+
+ if (foundIndexEntry)
+ jsTest.log("Rebuilt unfinished index. Collection: " + dbName + "." + collName +
+ " Index: " + indexName);
+ else
+ jsTest.log("Dropped unfinished index. Collection: " + dbName + "." + collName +
+ " Index: " + indexName);
+}
+
+function standaloneToStandaloneTest() {
+ let mongod = startStandalone();
+ let collDB = mongod.getDB(dbName);
+
+ addTestDocuments(collDB);
+
+ jsTest.log("Starting an index build on a standalone and leaving it unfinished.");
+ assert.commandWorked(collDB.adminCommand(
+ {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "alwaysOn"}));
+ try {
+ assert.commandFailedWithCode(
+ collDB.runCommand({createIndexes: collName, indexes: indexesToBuild}),
+ ErrorCodes.InterruptedAtShutdown);
+ } finally {
+ assert.commandWorked(collDB.adminCommand(
+ {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "off"}));
}
- function checkForIndexRebuild(mongod, indexName, shouldExist) {
- let adminDB = mongod.getDB("admin");
- let collDB = mongod.getDB(dbName);
- let logs = adminDB.runCommand({getLog: "global"});
-
- let rebuildIndexLogEntry = false;
- let dropIndexLogEntry = false;
-
- /** The log should contain the following lines if it rebuilds or drops the index:
- * Rebuilding index. Collection: `collNss` Index: `indexName`
- * Dropping unfinished index. Collection: `collNss` Index: `indexName`
- */
- let rebuildIndexLine =
- "Rebuilding index. Collection: " + dbName + "." + collName + " Index: " + indexName;
- let dropIndexLine = "Dropping unfinished index. Collection: " + dbName + "." + collName +
- " Index: " + indexName;
- for (let line = 0; line < logs.log.length; line++) {
- if (logs.log[line].includes(rebuildIndexLine))
- rebuildIndexLogEntry = true;
- else if (logs.log[line].includes(dropIndexLine))
- dropIndexLogEntry = true;
- }
+ mongod = restartStandalone(mongod);
- // Can't be either missing both entries or have both entries for the given index name.
- assert.neq(rebuildIndexLogEntry, dropIndexLogEntry);
+ checkForIndexRebuild(mongod, firstIndex, /*shouldExist=*/false);
+ checkForIndexRebuild(mongod, secondIndex, /*shouldExist=*/false);
+ checkForIndexRebuild(mongod, thirdIndex, /*shouldExist=*/false);
+ checkForIndexRebuild(mongod, fourthIndex, /*shouldExist=*/false);
- // Ensure the index either exists or doesn't exist in the collection depending on the result
- // of the log.
- let collIndexes = collDB.getCollection(collName).getIndexes();
+ shutdownStandalone(mongod);
+}
- let foundIndexEntry = false;
- for (let index = 0; index < collIndexes.length; index++) {
- assert.eq(true, collIndexes[index].hasOwnProperty('ns'));
- if (collIndexes[index].name == indexName) {
- foundIndexEntry = true;
- break;
- }
- }
+function secondaryToStandaloneTest() {
+ let replSet = startReplSet();
+ let primary = replSet.getPrimary();
+ let secondary = replSet.getSecondary();
- // If the log claims it rebuilt an unfinished index, the index must exist.
- assert.eq(rebuildIndexLogEntry, foundIndexEntry);
+ let primaryDB = primary.getDB(dbName);
+ let secondaryDB = secondary.getDB(dbName);
- // If the log claims it dropped an unfinished index, the index must not exist.
- assert.eq(dropIndexLogEntry, !foundIndexEntry);
+ addTestDocuments(primaryDB);
- // Ensure our characterization matches the outcome of the index build.
- assert.eq(foundIndexEntry, (shouldExist ? true : false));
+ // Make sure the documents get replicated on the secondary.
+ replSet.awaitReplication();
- if (foundIndexEntry)
- jsTest.log("Rebuilt unfinished index. Collection: " + dbName + "." + collName +
- " Index: " + indexName);
- else
- jsTest.log("Dropped unfinished index. Collection: " + dbName + "." + collName +
- " Index: " + indexName);
- }
+ startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, /*writeConcern=*/2, secondaryDB);
- function standaloneToStandaloneTest() {
- let mongod = startStandalone();
- let collDB = mongod.getDB(dbName);
+ replSet.stopSet(/*signal=*/null, /*forRestart=*/true);
- addTestDocuments(collDB);
+ let mongod = restartStandalone(secondary);
- jsTest.log("Starting an index build on a standalone and leaving it unfinished.");
- assert.commandWorked(collDB.adminCommand(
- {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "alwaysOn"}));
- try {
- assert.commandFailedWithCode(
- collDB.runCommand({createIndexes: collName, indexes: indexesToBuild}),
- ErrorCodes.InterruptedAtShutdown);
- } finally {
- assert.commandWorked(collDB.adminCommand(
- {configureFailPoint: "leaveIndexBuildUnfinishedForShutdown", mode: "off"}));
- }
-
- mongod = restartStandalone(mongod);
+ checkForIndexRebuild(mongod, firstIndex, /*shouldExist=*/true);
+ checkForIndexRebuild(mongod, secondIndex, /*shouldExist=*/true);
+ checkForIndexRebuild(mongod, thirdIndex, /*shouldExist=*/true);
+ checkForIndexRebuild(mongod, fourthIndex, /*shouldExist=*/true);
- checkForIndexRebuild(mongod, firstIndex, /*shouldExist=*/false);
- checkForIndexRebuild(mongod, secondIndex, /*shouldExist=*/false);
- checkForIndexRebuild(mongod, thirdIndex, /*shouldExist=*/false);
- checkForIndexRebuild(mongod, fourthIndex, /*shouldExist=*/false);
+ shutdownStandalone(mongod);
- shutdownStandalone(mongod);
+ mongod = restartStandalone(primary);
+ let specs = mongod.getDB(dbName).getCollection(collName).getIndexes();
+ assert.eq(specs.length, 5);
+ for (let index = 0; index < specs.length; index++) {
+ assert.eq(true, specs[index].hasOwnProperty('ns'));
}
- function secondaryToStandaloneTest() {
- let replSet = startReplSet();
- let primary = replSet.getPrimary();
- let secondary = replSet.getSecondary();
-
- let primaryDB = primary.getDB(dbName);
- let secondaryDB = secondary.getDB(dbName);
-
- addTestDocuments(primaryDB);
-
- // Make sure the documents get replicated on the secondary.
- replSet.awaitReplication();
-
- startIndexBuildOnSecondaryAndLeaveUnfinished(primaryDB, /*writeConcern=*/2, secondaryDB);
-
- replSet.stopSet(/*signal=*/null, /*forRestart=*/true);
-
- let mongod = restartStandalone(secondary);
-
- checkForIndexRebuild(mongod, firstIndex, /*shouldExist=*/true);
- checkForIndexRebuild(mongod, secondIndex, /*shouldExist=*/true);
- checkForIndexRebuild(mongod, thirdIndex, /*shouldExist=*/true);
- checkForIndexRebuild(mongod, fourthIndex, /*shouldExist=*/true);
-
- shutdownStandalone(mongod);
-
- mongod = restartStandalone(primary);
- let specs = mongod.getDB(dbName).getCollection(collName).getIndexes();
- assert.eq(specs.length, 5);
- for (let index = 0; index < specs.length; index++) {
- assert.eq(true, specs[index].hasOwnProperty('ns'));
- }
-
- shutdownStandalone(mongod);
- }
+ shutdownStandalone(mongod);
+}
- /* Begin tests */
- jsTest.log("Restarting nodes as standalone with unfinished indexes.");
+/* Begin tests */
+jsTest.log("Restarting nodes as standalone with unfinished indexes.");
- // Standalone restarts as standalone
- jsTest.log("Restarting standalone mongod.");
- standaloneToStandaloneTest();
+// Standalone restarts as standalone
+jsTest.log("Restarting standalone mongod.");
+standaloneToStandaloneTest();
- // Replica set node restarts as standalone
- jsTest.log("Restarting replica set node mongod.");
- secondaryToStandaloneTest();
+// Replica set node restarts as standalone
+jsTest.log("Restarting replica set node mongod.");
+secondaryToStandaloneTest();
})();
diff --git a/jstests/noPassthrough/child_op_numyields.js b/jstests/noPassthrough/child_op_numyields.js
index 04c79d308ba..fbc5dc773dc 100644
--- a/jstests/noPassthrough/child_op_numyields.js
+++ b/jstests/noPassthrough/child_op_numyields.js
@@ -3,42 +3,42 @@
* as the latter are popped off the CurOp stack.
*/
(function() {
- "use strict";
-
- // Start a single mongoD using MongoRunner.
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
-
- // Create the test DB and collection.
- const testDB = conn.getDB("currentop_yield");
- const adminDB = conn.getDB("admin");
- const testColl = testDB.test;
-
- // Queries current operations until a single matching operation is found.
- function awaitMatchingCurrentOp(match) {
- let currentOp = null;
- assert.soon(() => {
- currentOp = adminDB.aggregate([{$currentOp: {}}, match]).toArray();
- return (currentOp.length === 1);
- });
- return currentOp[0];
- }
-
- // Executes a bulk remove using the specified 'docsToRemove' array, captures the 'numYields'
- // metrics from each child op, and confirms that the parent op's 'numYields' total is equivalent
- // to the sum of the child ops.
- function runYieldTest(docsToRemove) {
- // Sets parameters such that all operations will yield & the operation hangs on the server
- // when we need to test.
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "alwaysOn"}));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterAllChildRemoveOpsArePopped", mode: "alwaysOn"}));
-
- // Starts parallel shell to run the command that will hang.
- const awaitShell = startParallelShell(`{
+"use strict";
+
+// Start a single mongoD using MongoRunner.
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+
+// Create the test DB and collection.
+const testDB = conn.getDB("currentop_yield");
+const adminDB = conn.getDB("admin");
+const testColl = testDB.test;
+
+// Queries current operations until a single matching operation is found.
+function awaitMatchingCurrentOp(match) {
+ let currentOp = null;
+ assert.soon(() => {
+ currentOp = adminDB.aggregate([{$currentOp: {}}, match]).toArray();
+ return (currentOp.length === 1);
+ });
+ return currentOp[0];
+}
+
+// Executes a bulk remove using the specified 'docsToRemove' array, captures the 'numYields'
+// metrics from each child op, and confirms that the parent op's 'numYields' total is equivalent
+// to the sum of the child ops.
+function runYieldTest(docsToRemove) {
+ // Sets parameters such that all operations will yield & the operation hangs on the server
+ // when we need to test.
+ assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "alwaysOn"}));
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangAfterAllChildRemoveOpsArePopped", mode: "alwaysOn"}));
+
+ // Starts parallel shell to run the command that will hang.
+ const awaitShell = startParallelShell(`{
const testDB = db.getSiblingDB("currentop_yield");
const bulkRemove = testDB.test.initializeOrderedBulkOp();
for(let doc of ${tojsononeline(docsToRemove)}) {
@@ -48,73 +48,73 @@
}`,
testDB.getMongo().port);
- let childOpId = null;
- let childYields = 0;
-
- // Get child operations and sum yields. Each child op encounters two failpoints while
- // running: 'hangBeforeChildRemoveOpFinishes' followed by 'hangBeforeChildRemoveOpIsPopped'.
- // We use these two failpoints as an 'airlock', hanging at the first while we enable the
- // second, then hanging at the second while we enable the first, to ensure that each child
- // op is caught and their individual 'numYields' recorded.
- for (let childCount = 0; childCount < docsToRemove.length; childCount++) {
- // Wait for the child op to hit the first of two failpoints.
- let childCurOp = awaitMatchingCurrentOp(
- {$match: {ns: testColl.getFullName(), msg: "hangBeforeChildRemoveOpFinishes"}});
-
- // Add the child's yield count to the running total, and record the opid.
- assert(childOpId === null || childOpId === childCurOp.opid);
- assert.gt(childCurOp.numYields, 0);
- childYields += childCurOp.numYields;
- childOpId = childCurOp.opid;
-
- // Enable the subsequent 'hangBeforeChildRemoveOpIsPopped' failpoint, just after the
- // child op finishes but before it is popped from the stack.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpIsPopped", mode: "alwaysOn"}));
+ let childOpId = null;
+ let childYields = 0;
+
+ // Get child operations and sum yields. Each child op encounters two failpoints while
+ // running: 'hangBeforeChildRemoveOpFinishes' followed by 'hangBeforeChildRemoveOpIsPopped'.
+ // We use these two failpoints as an 'airlock', hanging at the first while we enable the
+ // second, then hanging at the second while we enable the first, to ensure that each child
+ // op is caught and their individual 'numYields' recorded.
+ for (let childCount = 0; childCount < docsToRemove.length; childCount++) {
+ // Wait for the child op to hit the first of two failpoints.
+ let childCurOp = awaitMatchingCurrentOp(
+ {$match: {ns: testColl.getFullName(), msg: "hangBeforeChildRemoveOpFinishes"}});
+
+ // Add the child's yield count to the running total, and record the opid.
+ assert(childOpId === null || childOpId === childCurOp.opid);
+ assert.gt(childCurOp.numYields, 0);
+ childYields += childCurOp.numYields;
+ childOpId = childCurOp.opid;
+
+ // Enable the subsequent 'hangBeforeChildRemoveOpIsPopped' failpoint, just after the
+ // child op finishes but before it is popped from the stack.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangBeforeChildRemoveOpIsPopped", mode: "alwaysOn"}));
- // Let the operation proceed to the 'hangBeforeChildRemoveOpIsPopped' failpoint.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "off"}));
- awaitMatchingCurrentOp(
- {$match: {ns: testColl.getFullName(), msg: "hangBeforeChildRemoveOpIsPopped"}});
-
- // If this is not the final child op, re-enable the 'hangBeforeChildRemoveOpFinishes'
- // failpoint from earlier so that we don't miss the next child.
- if (childCount + 1 < docsToRemove.length) {
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "alwaysOn"}));
- }
+ // Let the operation proceed to the 'hangBeforeChildRemoveOpIsPopped' failpoint.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "off"}));
+ awaitMatchingCurrentOp(
+ {$match: {ns: testColl.getFullName(), msg: "hangBeforeChildRemoveOpIsPopped"}});
- // Finally, allow the operation to continue.
+ // If this is not the final child op, re-enable the 'hangBeforeChildRemoveOpFinishes'
+ // failpoint from earlier so that we don't miss the next child.
+ if (childCount + 1 < docsToRemove.length) {
assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeChildRemoveOpIsPopped", mode: "off"}));
+ {configureFailPoint: "hangBeforeChildRemoveOpFinishes", mode: "alwaysOn"}));
}
- // Wait for the operation to hit the 'hangAfterAllChildRemoveOpsArePopped' failpoint, then
- // take the total number of yields recorded by the parent op.
- const parentCurOp = awaitMatchingCurrentOp(
- {$match: {opid: childOpId, op: "command", msg: "hangAfterAllChildRemoveOpsArePopped"}});
+ // Finally, allow the operation to continue.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangBeforeChildRemoveOpIsPopped", mode: "off"}));
+ }
- // Verify that the parent's yield count equals the sum of the child ops' yields.
- assert.eq(parentCurOp.numYields, childYields);
- assert.eq(parentCurOp.opid, childOpId);
+ // Wait for the operation to hit the 'hangAfterAllChildRemoveOpsArePopped' failpoint, then
+ // take the total number of yields recorded by the parent op.
+ const parentCurOp = awaitMatchingCurrentOp(
+ {$match: {opid: childOpId, op: "command", msg: "hangAfterAllChildRemoveOpsArePopped"}});
- // Allow the parent operation to complete.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterAllChildRemoveOpsArePopped", mode: "off"}));
+ // Verify that the parent's yield count equals the sum of the child ops' yields.
+ assert.eq(parentCurOp.numYields, childYields);
+ assert.eq(parentCurOp.opid, childOpId);
- // Wait for the parallel shell to complete.
- awaitShell();
- }
+ // Allow the parent operation to complete.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangAfterAllChildRemoveOpsArePopped", mode: "off"}));
+
+ // Wait for the parallel shell to complete.
+ awaitShell();
+}
- // Test that a parent remove op inherits the sum of its children's yields for a single remove.
- assert.commandWorked(testDB.test.insert({a: 2}));
- runYieldTest([{a: 2}]);
+// Test that a parent remove op inherits the sum of its children's yields for a single remove.
+assert.commandWorked(testDB.test.insert({a: 2}));
+runYieldTest([{a: 2}]);
- // Test that a parent remove op inherits the sum of its children's yields for multiple removes.
- const docsToTest = [{a: 1}, {a: 2}, {a: 3}, {a: 4}, {a: 5}];
- assert.commandWorked(testDB.test.insert(docsToTest));
- runYieldTest(docsToTest);
+// Test that a parent remove op inherits the sum of its children's yields for multiple removes.
+const docsToTest = [{a: 1}, {a: 2}, {a: 3}, {a: 4}, {a: 5}];
+assert.commandWorked(testDB.test.insert(docsToTest));
+runYieldTest(docsToTest);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/client_metadata_log.js b/jstests/noPassthrough/client_metadata_log.js
index f1b90492ce6..419a19a9ebb 100644
--- a/jstests/noPassthrough/client_metadata_log.js
+++ b/jstests/noPassthrough/client_metadata_log.js
@@ -3,64 +3,64 @@
* @tags: [requires_sharding]
*/
(function() {
- 'use strict';
+'use strict';
- let checkLog = function(conn) {
- let coll = conn.getCollection("test.foo");
- assert.writeOK(coll.insert({_id: 1}));
+let checkLog = function(conn) {
+ let coll = conn.getCollection("test.foo");
+ assert.writeOK(coll.insert({_id: 1}));
- print(`Checking ${conn.fullOptions.logFile} for client metadata message`);
- let log = cat(conn.fullOptions.logFile);
+ print(`Checking ${conn.fullOptions.logFile} for client metadata message`);
+ let log = cat(conn.fullOptions.logFile);
- assert(
- /received client metadata from .*: { application: { name: ".*" }, driver: { name: ".*", version: ".*" }, os: { type: ".*", name: ".*", architecture: ".*", version: ".*" } }/
- .test(log),
- "'received client metadata' log line missing in log file!\n" + "Log file contents: " +
- conn.fullOptions.logFile +
- "\n************************************************************\n" + log +
- "\n************************************************************");
- };
+ assert(
+ /received client metadata from .*: { application: { name: ".*" }, driver: { name: ".*", version: ".*" }, os: { type: ".*", name: ".*", architecture: ".*", version: ".*" } }/
+ .test(log),
+ "'received client metadata' log line missing in log file!\n" +
+ "Log file contents: " + conn.fullOptions.logFile +
+ "\n************************************************************\n" + log +
+ "\n************************************************************");
+};
- // Test MongoD
- let testMongoD = function() {
- let conn = MongoRunner.runMongod({useLogFiles: true});
- assert.neq(null, conn, 'mongod was unable to start up');
+// Test MongoD
+let testMongoD = function() {
+ let conn = MongoRunner.runMongod({useLogFiles: true});
+ assert.neq(null, conn, 'mongod was unable to start up');
- checkLog(conn);
+ checkLog(conn);
- MongoRunner.stopMongod(conn);
- };
+ MongoRunner.stopMongod(conn);
+};
- // Test MongoS
- let testMongoS = function() {
- let options = {
- mongosOptions: {useLogFiles: true},
- };
+// Test MongoS
+let testMongoS = function() {
+ let options = {
+ mongosOptions: {useLogFiles: true},
+ };
- let st = new ShardingTest({shards: 1, mongos: 1, other: options});
+ let st = new ShardingTest({shards: 1, mongos: 1, other: options});
- checkLog(st.s0);
+ checkLog(st.s0);
- // Validate db.currentOp() contains mongos information
- let curOp = st.s0.adminCommand({currentOp: 1});
- print(tojson(curOp));
+ // Validate db.currentOp() contains mongos information
+ let curOp = st.s0.adminCommand({currentOp: 1});
+ print(tojson(curOp));
- var inprogSample = null;
- for (let inprog of curOp.inprog) {
- if (inprog.hasOwnProperty("clientMetadata") &&
- inprog.clientMetadata.hasOwnProperty("mongos")) {
- inprogSample = inprog;
- break;
- }
+ var inprogSample = null;
+ for (let inprog of curOp.inprog) {
+ if (inprog.hasOwnProperty("clientMetadata") &&
+ inprog.clientMetadata.hasOwnProperty("mongos")) {
+ inprogSample = inprog;
+ break;
}
+ }
- assert.neq(inprogSample.clientMetadata.mongos.host, "unknown");
- assert.neq(inprogSample.clientMetadata.mongos.client, "unknown");
- assert.neq(inprogSample.clientMetadata.mongos.version, "unknown");
+ assert.neq(inprogSample.clientMetadata.mongos.host, "unknown");
+ assert.neq(inprogSample.clientMetadata.mongos.client, "unknown");
+ assert.neq(inprogSample.clientMetadata.mongos.version, "unknown");
- st.stop();
- };
+ st.stop();
+};
- testMongoD();
- testMongoS();
+testMongoD();
+testMongoS();
})();
diff --git a/jstests/noPassthrough/client_metadata_slowlog.js b/jstests/noPassthrough/client_metadata_slowlog.js
index 993d7c47914..aab419023fe 100644
--- a/jstests/noPassthrough/client_metadata_slowlog.js
+++ b/jstests/noPassthrough/client_metadata_slowlog.js
@@ -2,32 +2,32 @@
* Test that verifies client metadata is logged as part of slow query logging in MongoD.
*/
(function() {
- 'use strict';
+'use strict';
- let conn = MongoRunner.runMongod({useLogFiles: true});
- assert.neq(null, conn, 'mongod was unable to start up');
+let conn = MongoRunner.runMongod({useLogFiles: true});
+assert.neq(null, conn, 'mongod was unable to start up');
- let coll = conn.getCollection("test.foo");
- assert.writeOK(coll.insert({_id: 1}));
+let coll = conn.getCollection("test.foo");
+assert.writeOK(coll.insert({_id: 1}));
- // Do a really slow query beyond the 100ms threshold
- let count = coll.count({
- $where: function() {
- sleep(1000);
- return true;
- }
- });
- assert.eq(count, 1, "expected 1 document");
+// Do a really slow query beyond the 100ms threshold
+let count = coll.count({
+ $where: function() {
+ sleep(1000);
+ return true;
+ }
+});
+assert.eq(count, 1, "expected 1 document");
- print(`Checking ${conn.fullOptions.logFile} for client metadata message`);
- let log = cat(conn.fullOptions.logFile);
- assert(
- /COMMAND .* command test.foo appName: "MongoDB Shell" command: count { count: "foo", query: { \$where: function\(\)/
- .test(log),
- "'slow query' log line missing in mongod log file!\n" + "Log file contents: " +
- conn.fullOptions.logFile +
- "\n************************************************************\n" + log +
- "\n************************************************************");
+print(`Checking ${conn.fullOptions.logFile} for client metadata message`);
+let log = cat(conn.fullOptions.logFile);
+assert(
+ /COMMAND .* command test.foo appName: "MongoDB Shell" command: count { count: "foo", query: { \$where: function\(\)/
+ .test(log),
+ "'slow query' log line missing in mongod log file!\n" +
+ "Log file contents: " + conn.fullOptions.logFile +
+ "\n************************************************************\n" + log +
+ "\n************************************************************");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js b/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js
index 9e138eda22a..ebfd9456121 100644
--- a/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js
+++ b/jstests/noPassthrough/closeAll_with_background_ops_fails_safely.js
@@ -6,40 +6,40 @@
*/
(function() {
- "use strict";
- let replSet = new ReplSetTest({name: "server35671", nodes: 1});
- let setFailpointBool = (failpointName, alwaysOn, times) => {
- if (times) {
- return db.adminCommand({configureFailPoint: failpointName, mode: {"times": times}});
- } else if (alwaysOn) {
- return db.adminCommand({configureFailPoint: failpointName, mode: "alwaysOn"});
- } else {
- return db.adminCommand({configureFailPoint: failpointName, mode: "off"});
- }
- };
- replSet.startSet();
- replSet.initiate();
- var db = replSet.getPrimary();
- setFailpointBool("hangAfterStartingIndexBuildUnlocked", true);
+"use strict";
+let replSet = new ReplSetTest({name: "server35671", nodes: 1});
+let setFailpointBool = (failpointName, alwaysOn, times) => {
+ if (times) {
+ return db.adminCommand({configureFailPoint: failpointName, mode: {"times": times}});
+ } else if (alwaysOn) {
+ return db.adminCommand({configureFailPoint: failpointName, mode: "alwaysOn"});
+ } else {
+ return db.adminCommand({configureFailPoint: failpointName, mode: "off"});
+ }
+};
+replSet.startSet();
+replSet.initiate();
+var db = replSet.getPrimary();
+setFailpointBool("hangAfterStartingIndexBuildUnlocked", true);
- // Blocks because of failpoint
- var join = startParallelShell("db.coll.createIndex({a: 1, b: 1}, {background: true})",
- replSet.ports[0]);
+// Blocks because of failpoint
+var join =
+ startParallelShell("db.coll.createIndex({a: 1, b: 1}, {background: true})", replSet.ports[0]);
- // Let the createIndex start to run.
- assert.soon(function() {
- // Need to do getDB because getPrimary returns something slightly different.
- let res = db.getDB("test").currentOp({"command.createIndexes": "coll"});
- return res['ok'] === 1 && res["inprog"].length > 0;
- });
+// Let the createIndex start to run.
+assert.soon(function() {
+ // Need to do getDB because getPrimary returns something slightly different.
+ let res = db.getDB("test").currentOp({"command.createIndexes": "coll"});
+ return res['ok'] === 1 && res["inprog"].length > 0;
+});
- // Repeated calls should continue to fail without crashing.
- assert.commandFailed(db.adminCommand({restartCatalog: 1}));
- assert.commandFailed(db.adminCommand({restartCatalog: 1}));
- assert.commandFailed(db.adminCommand({restartCatalog: 1}));
+// Repeated calls should continue to fail without crashing.
+assert.commandFailed(db.adminCommand({restartCatalog: 1}));
+assert.commandFailed(db.adminCommand({restartCatalog: 1}));
+assert.commandFailed(db.adminCommand({restartCatalog: 1}));
- // Unset failpoint so we can join the parallel shell.
- setFailpointBool("hangAfterStartingIndexBuildUnlocked", false);
- join();
- replSet.stopSet();
+// Unset failpoint so we can join the parallel shell.
+setFailpointBool("hangAfterStartingIndexBuildUnlocked", false);
+join();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/coll_mod_apply_ops.js b/jstests/noPassthrough/coll_mod_apply_ops.js
index d5e1cc7e4e7..27ced6b1069 100644
--- a/jstests/noPassthrough/coll_mod_apply_ops.js
+++ b/jstests/noPassthrough/coll_mod_apply_ops.js
@@ -2,43 +2,43 @@
// in applyOps.
(function() {
- "use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up with empty options");
+"use strict";
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up with empty options");
- let dbCollModName = "db_coll_mod";
- const dbCollMod = conn.getDB(dbCollModName);
- dbCollMod.dropDatabase();
- let collName = "collModTest";
- let coll = dbCollMod[collName];
+let dbCollModName = "db_coll_mod";
+const dbCollMod = conn.getDB(dbCollModName);
+dbCollMod.dropDatabase();
+let collName = "collModTest";
+let coll = dbCollMod[collName];
- // Generate a random UUID that is distinct from collModTest's UUID.
- const randomUUID = UUID();
- assert.neq(randomUUID, coll.uuid);
+// Generate a random UUID that is distinct from collModTest's UUID.
+const randomUUID = UUID();
+assert.neq(randomUUID, coll.uuid);
- // Perform a collMod to initialize validationLevel to "off".
- assert.commandWorked(dbCollMod.createCollection(collName));
- let cmd = {"collMod": collName, "validationLevel": "off"};
- let res = dbCollMod.runCommand(cmd);
- assert.commandWorked(res, 'could not run ' + tojson(cmd));
- let collectionInfosOriginal = dbCollMod.getCollectionInfos()[0];
- assert.eq(collectionInfosOriginal.options.validationLevel, "off");
+// Perform a collMod to initialize validationLevel to "off".
+assert.commandWorked(dbCollMod.createCollection(collName));
+let cmd = {"collMod": collName, "validationLevel": "off"};
+let res = dbCollMod.runCommand(cmd);
+assert.commandWorked(res, 'could not run ' + tojson(cmd));
+let collectionInfosOriginal = dbCollMod.getCollectionInfos()[0];
+assert.eq(collectionInfosOriginal.options.validationLevel, "off");
- // Perform an applyOps command with a nonexistent UUID and the same name as an existing
- // collection. applyOps should succeed because of idempotency but a NamespaceNotFound
- // uassert should be thrown during collMod application.
- let collModApplyOpsEntry = {
- "v": 2,
- "op": "c",
- "ns": dbCollModName + ".$cmd",
- "ui": randomUUID,
- "o2": {"collectionOptions_old": {"uuid": randomUUID}},
- "o": {"collMod": collName, "validationLevel": "moderate"}
- };
- assert.commandWorked(dbCollMod.adminCommand({"applyOps": [collModApplyOpsEntry]}));
+// Perform an applyOps command with a nonexistent UUID and the same name as an existing
+// collection. applyOps should succeed because of idempotency but a NamespaceNotFound
+// uassert should be thrown during collMod application.
+let collModApplyOpsEntry = {
+ "v": 2,
+ "op": "c",
+ "ns": dbCollModName + ".$cmd",
+ "ui": randomUUID,
+ "o2": {"collectionOptions_old": {"uuid": randomUUID}},
+ "o": {"collMod": collName, "validationLevel": "moderate"}
+};
+assert.commandWorked(dbCollMod.adminCommand({"applyOps": [collModApplyOpsEntry]}));
- // Ensure the collection options of the existing collection were not affected.
- assert.eq(dbCollMod.getCollectionInfos()[0].name, collName);
- assert.eq(dbCollMod.getCollectionInfos()[0].options.validationLevel, "off");
- MongoRunner.stopMongod(conn);
+// Ensure the collection options of the existing collection were not affected.
+assert.eq(dbCollMod.getCollectionInfos()[0].name, collName);
+assert.eq(dbCollMod.getCollectionInfos()[0].options.validationLevel, "off");
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/collation_clone_collection.js b/jstests/noPassthrough/collation_clone_collection.js
index 538b49d2077..14a729eb518 100644
--- a/jstests/noPassthrough/collation_clone_collection.js
+++ b/jstests/noPassthrough/collation_clone_collection.js
@@ -3,74 +3,74 @@
* used when filtering the source collection.
*/
(function() {
- "use strict";
+"use strict";
- var source = MongoRunner.runMongod({});
- assert.neq(null, source, "mongod was unable to start up");
+var source = MongoRunner.runMongod({});
+assert.neq(null, source, "mongod was unable to start up");
- var dest = MongoRunner.runMongod({});
- assert.neq(null, dest, "mongod was unable to start up");
+var dest = MongoRunner.runMongod({});
+assert.neq(null, dest, "mongod was unable to start up");
- var sourceColl = source.getDB("test").collation;
- var destColl = dest.getDB("test").collation;
+var sourceColl = source.getDB("test").collation;
+var destColl = dest.getDB("test").collation;
- assert.commandWorked(sourceColl.getDB().runCommand(
- {create: sourceColl.getName(), collation: {locale: "en", strength: 2}}));
- // We remove UUIDs before comparing as collection cloning results in a new UUID.
- var sourceCollectionInfos =
- sourceColl.getDB().getCollectionInfos({name: sourceColl.getName()}).map((collInfo) => {
- delete collInfo.info.uuid;
- return collInfo;
- });
-
- assert.writeOK(sourceColl.insert({_id: "FOO"}));
- assert.writeOK(sourceColl.insert({_id: "bar"}));
- assert.eq([{_id: "FOO"}],
- sourceColl.find({_id: "foo"}).toArray(),
- "query should have performed a case-insensitive match");
-
- assert.commandWorked(
- sourceColl.createIndex({withSimpleCollation: 1}, {collation: {locale: "simple"}}));
- assert.commandWorked(sourceColl.createIndex({withDefaultCollation: 1}));
- assert.commandWorked(
- sourceColl.createIndex({withNonDefaultCollation: 1}, {collation: {locale: "fr"}}));
- var sourceIndexInfos = sourceColl.getIndexes().map(function(indexInfo) {
- // We remove the "ns" field from the index specification when comparing whether the indexes
- // that were cloned are equivalent because they were built on a different namespace.
- delete indexInfo.ns;
- return indexInfo;
+assert.commandWorked(sourceColl.getDB().runCommand(
+ {create: sourceColl.getName(), collation: {locale: "en", strength: 2}}));
+// We remove UUIDs before comparing as collection cloning results in a new UUID.
+var sourceCollectionInfos =
+ sourceColl.getDB().getCollectionInfos({name: sourceColl.getName()}).map((collInfo) => {
+ delete collInfo.info.uuid;
+ return collInfo;
});
- // Test that the "cloneCollection" command respects the collection-default collation.
- destColl.drop();
- assert.commandWorked(destColl.getDB().runCommand({
- cloneCollection: sourceColl.getFullName(),
- from: sourceColl.getMongo().host,
- query: {_id: "foo"}
- }));
+assert.writeOK(sourceColl.insert({_id: "FOO"}));
+assert.writeOK(sourceColl.insert({_id: "bar"}));
+assert.eq([{_id: "FOO"}],
+ sourceColl.find({_id: "foo"}).toArray(),
+ "query should have performed a case-insensitive match");
- var destCollectionInfos =
- destColl.getDB().getCollectionInfos({name: destColl.getName()}).map((collInfo) => {
- delete collInfo.info.uuid;
- return collInfo;
- });
- assert.eq(sourceCollectionInfos, destCollectionInfos);
- assert.eq([{_id: "FOO"}], destColl.find({}).toArray());
+assert.commandWorked(
+ sourceColl.createIndex({withSimpleCollation: 1}, {collation: {locale: "simple"}}));
+assert.commandWorked(sourceColl.createIndex({withDefaultCollation: 1}));
+assert.commandWorked(
+ sourceColl.createIndex({withNonDefaultCollation: 1}, {collation: {locale: "fr"}}));
+var sourceIndexInfos = sourceColl.getIndexes().map(function(indexInfo) {
+ // We remove the "ns" field from the index specification when comparing whether the indexes
+ // that were cloned are equivalent because they were built on a different namespace.
+ delete indexInfo.ns;
+ return indexInfo;
+});
- var destIndexInfos = destColl.getIndexes().map(function(indexInfo) {
- // We remove the "ns" field from the index specification when comparing whether the indexes
- // that were cloned are equivalent because they were built on a different namespace.
- delete indexInfo.ns;
- return indexInfo;
+// Test that the "cloneCollection" command respects the collection-default collation.
+destColl.drop();
+assert.commandWorked(destColl.getDB().runCommand({
+ cloneCollection: sourceColl.getFullName(),
+ from: sourceColl.getMongo().host,
+ query: {_id: "foo"}
+}));
+
+var destCollectionInfos =
+ destColl.getDB().getCollectionInfos({name: destColl.getName()}).map((collInfo) => {
+ delete collInfo.info.uuid;
+ return collInfo;
});
+assert.eq(sourceCollectionInfos, destCollectionInfos);
+assert.eq([{_id: "FOO"}], destColl.find({}).toArray());
+
+var destIndexInfos = destColl.getIndexes().map(function(indexInfo) {
+ // We remove the "ns" field from the index specification when comparing whether the indexes
+ // that were cloned are equivalent because they were built on a different namespace.
+ delete indexInfo.ns;
+ return indexInfo;
+});
- assert.eq(sourceIndexInfos.length,
- destIndexInfos.length,
- "Number of indexes don't match; source: " + tojson(sourceIndexInfos) + ", dest: " +
- tojson(destIndexInfos));
- for (var i = 0; i < sourceIndexInfos.length; ++i) {
- assert.contains(sourceIndexInfos[i], destIndexInfos);
- }
- MongoRunner.stopMongod(source);
- MongoRunner.stopMongod(dest);
+assert.eq(sourceIndexInfos.length,
+ destIndexInfos.length,
+ "Number of indexes don't match; source: " + tojson(sourceIndexInfos) +
+ ", dest: " + tojson(destIndexInfos));
+for (var i = 0; i < sourceIndexInfos.length; ++i) {
+ assert.contains(sourceIndexInfos[i], destIndexInfos);
+}
+MongoRunner.stopMongod(source);
+MongoRunner.stopMongod(dest);
})();
diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js
index 3838c90425c..6811bf77ec2 100644
--- a/jstests/noPassthrough/commands_handle_kill.js
+++ b/jstests/noPassthrough/commands_handle_kill.js
@@ -1,117 +1,115 @@
// Tests that commands properly handle their underlying plan executor failing or being killed.
(function() {
- 'use strict';
- const dbpath = MongoRunner.dataPath + jsTest.name();
- resetDbpath(dbpath);
- const mongod = MongoRunner.runMongod({dbpath: dbpath});
- const db = mongod.getDB("test");
- const collName = jsTest.name();
- const coll = db.getCollection(collName);
-
- // How many works it takes to yield.
- const yieldIterations = 2;
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: yieldIterations}));
- const nDocs = yieldIterations + 2;
-
- /**
- * Asserts that 'commandResult' indicates a command failure, and returns the error message.
- */
- function assertContainsErrorMessage(commandResult) {
- assert(commandResult.ok === 0 ||
- (commandResult.ok === 1 && commandResult.writeErrors !== undefined),
- 'expected command to fail: ' + tojson(commandResult));
- if (commandResult.ok === 0) {
- return commandResult.errmsg;
- } else {
- return commandResult.writeErrors[0].errmsg;
- }
+'use strict';
+const dbpath = MongoRunner.dataPath + jsTest.name();
+resetDbpath(dbpath);
+const mongod = MongoRunner.runMongod({dbpath: dbpath});
+const db = mongod.getDB("test");
+const collName = jsTest.name();
+const coll = db.getCollection(collName);
+
+// How many works it takes to yield.
+const yieldIterations = 2;
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: yieldIterations}));
+const nDocs = yieldIterations + 2;
+
+/**
+ * Asserts that 'commandResult' indicates a command failure, and returns the error message.
+ */
+function assertContainsErrorMessage(commandResult) {
+ assert(commandResult.ok === 0 ||
+ (commandResult.ok === 1 && commandResult.writeErrors !== undefined),
+ 'expected command to fail: ' + tojson(commandResult));
+ if (commandResult.ok === 0) {
+ return commandResult.errmsg;
+ } else {
+ return commandResult.writeErrors[0].errmsg;
}
+}
- function setupCollection() {
- coll.drop();
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < nDocs; i++) {
- bulk.insert({_id: i, a: i});
- }
- assert.writeOK(bulk.execute());
- assert.commandWorked(coll.createIndex({a: 1}));
+function setupCollection() {
+ coll.drop();
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i, a: i});
}
+ assert.writeOK(bulk.execute());
+ assert.commandWorked(coll.createIndex({a: 1}));
+}
- /**
- * Asserts that the command given by 'cmdObj' will propagate a message from a PlanExecutor
- * failure back to the user.
- */
- function assertCommandPropogatesPlanExecutorFailure(cmdObj) {
- // Make sure the command propagates failure messages.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
- let res = db.runCommand(cmdObj);
- let errorMessage = assertContainsErrorMessage(res);
- assert.neq(errorMessage.indexOf("planExecutorAlwaysFails"),
- -1,
- "Expected error message to include 'planExecutorAlwaysFails', instead found: " +
- errorMessage);
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
- }
+/**
+ * Asserts that the command given by 'cmdObj' will propagate a message from a PlanExecutor
+ * failure back to the user.
+ */
+function assertCommandPropogatesPlanExecutorFailure(cmdObj) {
+ // Make sure the command propagates failure messages.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
+ let res = db.runCommand(cmdObj);
+ let errorMessage = assertContainsErrorMessage(res);
+ assert.neq(errorMessage.indexOf("planExecutorAlwaysFails"),
+ -1,
+ "Expected error message to include 'planExecutorAlwaysFails', instead found: " +
+ errorMessage);
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
+}
- /**
- * Asserts that the command properly handles failure scenarios while using its PlanExecutor.
- * Asserts that the appropriate error message is propagated if the is a failure during
- * execution, or if the plan was killed during execution. If 'options.commandYields' is false,
- * asserts that the PlanExecutor cannot be killed, and succeeds when run concurrently with any
- * of 'invalidatingCommands'.
- *
- * @param {Object} cmdObj - The command to run.
- * @param {Boolean} [options.commandYields=true] - Whether or not this command can yield during
- * execution.
- * @param {Object} [options.curOpFilter] - The query to use to find this operation in the
- * currentOp output. The default checks that all fields of cmdObj are in the curOp command.
- * @param {Function} [options.customSetup=undefined] - A callback to do any necessary setup
- * before the command can be run, like adding a geospatial index before a geoNear command.
- * @param {Boolean} [options.usesIndex] - True if this command should scan index {a: 1}, and
- * therefore should be killed if this index is dropped.
- */
- function assertCommandPropogatesPlanExecutorKillReason(cmdObj, options) {
- options = options || {};
-
- var curOpFilter = options.curOpFilter;
- if (!curOpFilter) {
- curOpFilter = {};
- for (var arg in cmdObj) {
- curOpFilter['command.' + arg] = {$eq: cmdObj[arg]};
- }
+/**
+ * Asserts that the command properly handles failure scenarios while using its PlanExecutor.
+ * Asserts that the appropriate error message is propagated if the is a failure during
+ * execution, or if the plan was killed during execution. If 'options.commandYields' is false,
+ * asserts that the PlanExecutor cannot be killed, and succeeds when run concurrently with any
+ * of 'invalidatingCommands'.
+ *
+ * @param {Object} cmdObj - The command to run.
+ * @param {Boolean} [options.commandYields=true] - Whether or not this command can yield during
+ * execution.
+ * @param {Object} [options.curOpFilter] - The query to use to find this operation in the
+ * currentOp output. The default checks that all fields of cmdObj are in the curOp command.
+ * @param {Function} [options.customSetup=undefined] - A callback to do any necessary setup
+ * before the command can be run, like adding a geospatial index before a geoNear command.
+ * @param {Boolean} [options.usesIndex] - True if this command should scan index {a: 1}, and
+ * therefore should be killed if this index is dropped.
+ */
+function assertCommandPropogatesPlanExecutorKillReason(cmdObj, options) {
+ options = options || {};
+
+ var curOpFilter = options.curOpFilter;
+ if (!curOpFilter) {
+ curOpFilter = {};
+ for (var arg in cmdObj) {
+ curOpFilter['command.' + arg] = {$eq: cmdObj[arg]};
}
+ }
- // These are commands that will cause all running PlanExecutors to be invalidated, and the
- // error messages that should be propagated when that happens.
- const invalidatingCommands = [
- {command: {dropDatabase: 1}, message: 'collection dropped'},
- {command: {drop: collName}, message: 'collection dropped'},
- ];
-
- if (options.usesIndex) {
- invalidatingCommands.push({
- command: {dropIndexes: collName, index: {a: 1}},
- message: 'index \'a_1\' dropped'
- });
- }
+ // These are commands that will cause all running PlanExecutors to be invalidated, and the
+ // error messages that should be propagated when that happens.
+ const invalidatingCommands = [
+ {command: {dropDatabase: 1}, message: 'collection dropped'},
+ {command: {drop: collName}, message: 'collection dropped'},
+ ];
+
+ if (options.usesIndex) {
+ invalidatingCommands.push(
+ {command: {dropIndexes: collName, index: {a: 1}}, message: 'index \'a_1\' dropped'});
+ }
- for (let invalidatingCommand of invalidatingCommands) {
- setupCollection();
- if (options.customSetup !== undefined) {
- options.customSetup();
- }
+ for (let invalidatingCommand of invalidatingCommands) {
+ setupCollection();
+ if (options.customSetup !== undefined) {
+ options.customSetup();
+ }
- // Enable a failpoint that causes PlanExecutors to hang during execution.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "setYieldAllLocksHang", mode: "alwaysOn"}));
+ // Enable a failpoint that causes PlanExecutors to hang during execution.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "setYieldAllLocksHang", mode: "alwaysOn"}));
- const canYield = options.commandYields === undefined || options.commandYields;
- // Start a parallel shell to run the command. This should hang until we unset the
- // failpoint.
- let awaitCmdFailure = startParallelShell(`
+ const canYield = options.commandYields === undefined || options.commandYields;
+ // Start a parallel shell to run the command. This should hang until we unset the
+ // failpoint.
+ let awaitCmdFailure = startParallelShell(`
let assertContainsErrorMessage = ${ assertContainsErrorMessage.toString() };
let res = db.runCommand(${ tojson(cmdObj) });
if (${ canYield }) {
@@ -130,94 +128,91 @@ if (${ canYield }) {
`,
mongod.port);
- // Wait until we can see the command running.
- assert.soon(
- function() {
- if (!canYield) {
- // The command won't yield, so we won't necessarily see it in currentOp.
- return true;
- }
- return db.currentOp({
- $and: [
- {
- ns: coll.getFullName(),
- numYields: {$gt: 0},
- },
- curOpFilter,
- ]
- }).inprog.length > 0;
- },
- function() {
- return 'expected to see command yielded in currentOp output. Command: ' +
- tojson(cmdObj) + '\n, currentOp output: ' + tojson(db.currentOp().inprog);
- });
-
- // Run the command that invalidates the PlanExecutor, then allow the PlanExecutor to
- // proceed.
- jsTestLog("Running invalidating command: " + tojson(invalidatingCommand.command));
- assert.commandWorked(db.runCommand(invalidatingCommand.command));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "setYieldAllLocksHang", mode: "off"}));
- awaitCmdFailure();
- }
+ // Wait until we can see the command running.
+ assert.soon(
+ function() {
+ if (!canYield) {
+ // The command won't yield, so we won't necessarily see it in currentOp.
+ return true;
+ }
+ return db.currentOp({
+ $and: [
+ {
+ ns: coll.getFullName(),
+ numYields: {$gt: 0},
+ },
+ curOpFilter,
+ ]
+ }).inprog.length > 0;
+ },
+ function() {
+ return 'expected to see command yielded in currentOp output. Command: ' +
+ tojson(cmdObj) + '\n, currentOp output: ' + tojson(db.currentOp().inprog);
+ });
- setupCollection();
- if (options.customSetup !== undefined) {
- options.customSetup();
- }
- assertCommandPropogatesPlanExecutorFailure(cmdObj);
+ // Run the command that invalidates the PlanExecutor, then allow the PlanExecutor to
+ // proceed.
+ jsTestLog("Running invalidating command: " + tojson(invalidatingCommand.command));
+ assert.commandWorked(db.runCommand(invalidatingCommand.command));
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "setYieldAllLocksHang", mode: "off"}));
+ awaitCmdFailure();
}
- // Disable aggregation's batching behavior, since that can prevent the PlanExecutor from being
- // active during the command that would have caused it to be killed.
- assert.commandWorked(
- db.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
- assertCommandPropogatesPlanExecutorKillReason({aggregate: collName, pipeline: [], cursor: {}});
- assertCommandPropogatesPlanExecutorKillReason(
- {aggregate: collName, pipeline: [{$match: {a: {$gte: 0}}}], cursor: {}}, {usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason({dataSize: coll.getFullName()},
- {commandYields: false});
-
- assertCommandPropogatesPlanExecutorKillReason("dbHash", {commandYields: false});
-
- assertCommandPropogatesPlanExecutorKillReason({count: collName, query: {a: {$gte: 0}}},
- {usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {distinct: collName, key: "_id", query: {a: {$gte: 0}}}, {usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {findAndModify: collName, query: {fakeField: {$gt: 0}}, update: {$inc: {a: 1}}});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {
- aggregate: collName,
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- distanceField: "dis"
- }
- }]
- },
- {
- customSetup: function() {
- assert.commandWorked(coll.createIndex({geoField: "2dsphere"}));
- }
- });
-
- assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {}});
- assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {a: {$gte: 0}}},
- {usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {update: coll.getName(), updates: [{q: {a: {$gte: 0}}, u: {$set: {a: 1}}}]},
- {curOpFilter: {op: 'update'}, usesIndex: true});
-
- assertCommandPropogatesPlanExecutorKillReason(
- {delete: coll.getName(), deletes: [{q: {a: {$gte: 0}}, limit: 0}]},
- {curOpFilter: {op: 'remove'}, usesIndex: true});
- MongoRunner.stopMongod(mongod);
+ setupCollection();
+ if (options.customSetup !== undefined) {
+ options.customSetup();
+ }
+ assertCommandPropogatesPlanExecutorFailure(cmdObj);
+}
+
+// Disable aggregation's batching behavior, since that can prevent the PlanExecutor from being
+// active during the command that would have caused it to be killed.
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalDocumentSourceCursorBatchSizeBytes: 1}));
+assertCommandPropogatesPlanExecutorKillReason({aggregate: collName, pipeline: [], cursor: {}});
+assertCommandPropogatesPlanExecutorKillReason(
+ {aggregate: collName, pipeline: [{$match: {a: {$gte: 0}}}], cursor: {}}, {usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason({dataSize: coll.getFullName()},
+ {commandYields: false});
+
+assertCommandPropogatesPlanExecutorKillReason("dbHash", {commandYields: false});
+
+assertCommandPropogatesPlanExecutorKillReason({count: collName, query: {a: {$gte: 0}}},
+ {usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {distinct: collName, key: "_id", query: {a: {$gte: 0}}}, {usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {findAndModify: collName, query: {fakeField: {$gt: 0}}, update: {$inc: {a: 1}}});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {
+ aggregate: collName,
+ cursor: {},
+ pipeline: [{
+ $geoNear:
+ {near: {type: "Point", coordinates: [0, 0]}, spherical: true, distanceField: "dis"}
+ }]
+ },
+ {
+ customSetup: function() {
+ assert.commandWorked(coll.createIndex({geoField: "2dsphere"}));
+ }
+ });
+
+assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {}});
+assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {a: {$gte: 0}}},
+ {usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {update: coll.getName(), updates: [{q: {a: {$gte: 0}}, u: {$set: {a: 1}}}]},
+ {curOpFilter: {op: 'update'}, usesIndex: true});
+
+assertCommandPropogatesPlanExecutorKillReason(
+ {delete: coll.getName(), deletes: [{q: {a: {$gte: 0}}, limit: 0}]},
+ {curOpFilter: {op: 'remove'}, usesIndex: true});
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/noPassthrough/commands_preserve_exec_error_code.js b/jstests/noPassthrough/commands_preserve_exec_error_code.js
index fadb4d55116..3925b74b551 100644
--- a/jstests/noPassthrough/commands_preserve_exec_error_code.js
+++ b/jstests/noPassthrough/commands_preserve_exec_error_code.js
@@ -3,47 +3,46 @@
// 'InterruptedDueToReplStateChange',
// and also to ensure that the error is not swallowed and the diagnostic info is not lost.
(function() {
- "use strict";
+"use strict";
- const mongod = MongoRunner.runMongod({});
- assert.neq(mongod, null, "mongod failed to start up");
- const db = mongod.getDB("test");
- const coll = db.commands_preserve_exec_error_code;
- coll.drop();
+const mongod = MongoRunner.runMongod({});
+assert.neq(mongod, null, "mongod failed to start up");
+const db = mongod.getDB("test");
+const coll = db.commands_preserve_exec_error_code;
+coll.drop();
- assert.writeOK(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
- assert.commandWorked(coll.createIndex({geo: "2d"}));
+assert.writeOK(coll.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+assert.commandWorked(coll.createIndex({geo: "2d"}));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "alwaysOn"}));
- function assertFailsWithInternalError(fn) {
- const error = assert.throws(fn);
- assert.eq(error.code, ErrorCodes.InternalError, tojson(error));
- assert.neq(-1,
- error.message.indexOf("planExecutorAlwaysFails"),
- "Expected error message to be preserved");
- }
- function assertCmdFailsWithInternalError(cmd) {
- const res =
- assert.commandFailedWithCode(db.runCommand(cmd), ErrorCodes.InternalError, tojson(cmd));
- assert.neq(-1,
- res.errmsg.indexOf("planExecutorAlwaysFails"),
- "Expected error message to be preserved");
- }
+function assertFailsWithInternalError(fn) {
+ const error = assert.throws(fn);
+ assert.eq(error.code, ErrorCodes.InternalError, tojson(error));
+ assert.neq(-1,
+ error.message.indexOf("planExecutorAlwaysFails"),
+ "Expected error message to be preserved");
+}
+function assertCmdFailsWithInternalError(cmd) {
+ const res =
+ assert.commandFailedWithCode(db.runCommand(cmd), ErrorCodes.InternalError, tojson(cmd));
+ assert.neq(-1,
+ res.errmsg.indexOf("planExecutorAlwaysFails"),
+ "Expected error message to be preserved");
+}
- assertFailsWithInternalError(() => coll.find().itcount());
- assertFailsWithInternalError(() => coll.updateOne({_id: 1}, {$set: {x: 2}}));
- assertFailsWithInternalError(() => coll.deleteOne({_id: 1}));
- assertFailsWithInternalError(() => coll.count({_id: 1}));
- assertFailsWithInternalError(() => coll.aggregate([]).itcount());
- assertFailsWithInternalError(
- () => coll.aggregate([{$geoNear: {near: [0, 0], distanceField: "d"}}]).itcount());
- assertCmdFailsWithInternalError({distinct: coll.getName(), key: "_id"});
- assertCmdFailsWithInternalError(
- {findAndModify: coll.getName(), query: {_id: 1}, update: {$set: {x: 2}}});
+assertFailsWithInternalError(() => coll.find().itcount());
+assertFailsWithInternalError(() => coll.updateOne({_id: 1}, {$set: {x: 2}}));
+assertFailsWithInternalError(() => coll.deleteOne({_id: 1}));
+assertFailsWithInternalError(() => coll.count({_id: 1}));
+assertFailsWithInternalError(() => coll.aggregate([]).itcount());
+assertFailsWithInternalError(
+ () => coll.aggregate([{$geoNear: {near: [0, 0], distanceField: "d"}}]).itcount());
+assertCmdFailsWithInternalError({distinct: coll.getName(), key: "_id"});
+assertCmdFailsWithInternalError(
+ {findAndModify: coll.getName(), query: {_id: 1}, update: {$set: {x: 2}}});
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
- MongoRunner.stopMongod(mongod);
+assert.commandWorked(db.adminCommand({configureFailPoint: "planExecutorAlwaysFails", mode: "off"}));
+MongoRunner.stopMongod(mongod);
}());
diff --git a/jstests/noPassthrough/commit_quorum.js b/jstests/noPassthrough/commit_quorum.js
index 7d4366fc798..58183f1ab1c 100644
--- a/jstests/noPassthrough/commit_quorum.js
+++ b/jstests/noPassthrough/commit_quorum.js
@@ -4,97 +4,97 @@
* @tags: [requires_replication]
*/
(function() {
- load("jstests/noPassthrough/libs/index_build.js");
- load("jstests/libs/check_log.js");
-
- const replSet = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+load("jstests/noPassthrough/libs/index_build.js");
+load("jstests/libs/check_log.js");
+
+const replSet = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
-
- // Allow the createIndexes command to use the index builds coordinator in single-phase mode.
- replSet.startSet({setParameter: {enableIndexBuildsCoordinatorForCreateIndexesCommand: true}});
- replSet.initiate();
-
- const testDB = replSet.getPrimary().getDB('test');
- const coll = testDB.twoPhaseIndexBuild;
-
- const bulk = coll.initializeUnorderedBulkOp();
- const numDocs = 1000;
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({a: i, b: i});
- }
- assert.commandWorked(bulk.execute());
-
- const collName = "createIndexes";
-
- // Use createIndex(es) to build indexes and check the commit quorum.
- let res = assert.commandWorked(testDB[collName].createIndex({x: 1}));
- assert.eq(2, res.commitQuorum);
-
- res = assert.commandWorked(testDB[collName].createIndex({y: 1}, {}, 1));
- assert.eq(1, res.commitQuorum);
-
- res = assert.commandWorked(testDB[collName].createIndexes([{i: 1}]));
- assert.eq(2, res.commitQuorum);
-
- res = assert.commandWorked(testDB[collName].createIndexes([{j: 1}], {}, 1));
- assert.eq(1, res.commitQuorum);
-
- replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
-
- let awaitShell;
- try {
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterIndexBuildSecondDrain", mode: "alwaysOn"}));
-
- // Starts parallel shell to run the command that will hang.
- awaitShell = startParallelShell(function() {
- // Use the index builds coordinator for a two-phase index build.
- assert.commandWorked(db.runCommand({
- twoPhaseCreateIndexes: 'twoPhaseIndexBuild',
- indexes: [{key: {a: 1}, name: 'a_1'}],
- commitQuorum: "majority"
- }));
- }, testDB.getMongo().port);
-
- checkLog.containsWithCount(replSet.getPrimary(), "Waiting for index build to complete", 5);
-
- // Test setting various commit quorums on the index build in our two node replica set.
- assert.commandFailed(testDB.runCommand(
- {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 3}));
- assert.commandFailed(testDB.runCommand({
- setIndexCommitQuorum: 'twoPhaseIndexBuild',
- indexNames: ['a_1'],
- commitQuorum: "someTag"
- }));
+ },
+ ]
+});
- assert.commandWorked(testDB.runCommand(
- {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 0}));
- assert.commandWorked(testDB.runCommand(
- {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 2}));
- assert.commandWorked(testDB.runCommand({
- setIndexCommitQuorum: 'twoPhaseIndexBuild',
- indexNames: ['a_1'],
- commitQuorum: "majority"
- }));
- } finally {
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterIndexBuildSecondDrain", mode: "off"}));
- }
+// Allow the createIndexes command to use the index builds coordinator in single-phase mode.
+replSet.startSet({setParameter: {enableIndexBuildsCoordinatorForCreateIndexesCommand: true}});
+replSet.initiate();
+
+const testDB = replSet.getPrimary().getDB('test');
+const coll = testDB.twoPhaseIndexBuild;
+
+const bulk = coll.initializeUnorderedBulkOp();
+const numDocs = 1000;
+for (let i = 0; i < numDocs; i++) {
+ bulk.insert({a: i, b: i});
+}
+assert.commandWorked(bulk.execute());
+
+const collName = "createIndexes";
- // Wait for the parallel shell to complete.
- awaitShell();
+// Use createIndex(es) to build indexes and check the commit quorum.
+let res = assert.commandWorked(testDB[collName].createIndex({x: 1}));
+assert.eq(2, res.commitQuorum);
- IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
+res = assert.commandWorked(testDB[collName].createIndex({y: 1}, {}, 1));
+assert.eq(1, res.commitQuorum);
- replSet.stopSet();
+res = assert.commandWorked(testDB[collName].createIndexes([{i: 1}]));
+assert.eq(2, res.commitQuorum);
+
+res = assert.commandWorked(testDB[collName].createIndexes([{j: 1}], {}, 1));
+assert.eq(1, res.commitQuorum);
+
+replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
+
+let awaitShell;
+try {
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangAfterIndexBuildSecondDrain", mode: "alwaysOn"}));
+
+ // Starts parallel shell to run the command that will hang.
+ awaitShell = startParallelShell(function() {
+ // Use the index builds coordinator for a two-phase index build.
+ assert.commandWorked(db.runCommand({
+ twoPhaseCreateIndexes: 'twoPhaseIndexBuild',
+ indexes: [{key: {a: 1}, name: 'a_1'}],
+ commitQuorum: "majority"
+ }));
+ }, testDB.getMongo().port);
+
+ checkLog.containsWithCount(replSet.getPrimary(), "Waiting for index build to complete", 5);
+
+ // Test setting various commit quorums on the index build in our two node replica set.
+ assert.commandFailed(testDB.runCommand(
+ {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 3}));
+ assert.commandFailed(testDB.runCommand({
+ setIndexCommitQuorum: 'twoPhaseIndexBuild',
+ indexNames: ['a_1'],
+ commitQuorum: "someTag"
+ }));
+
+ assert.commandWorked(testDB.runCommand(
+ {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 0}));
+ assert.commandWorked(testDB.runCommand(
+ {setIndexCommitQuorum: 'twoPhaseIndexBuild', indexNames: ['a_1'], commitQuorum: 2}));
+ assert.commandWorked(testDB.runCommand({
+ setIndexCommitQuorum: 'twoPhaseIndexBuild',
+ indexNames: ['a_1'],
+ commitQuorum: "majority"
+ }));
+} finally {
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangAfterIndexBuildSecondDrain", mode: "off"}));
+}
+
+// Wait for the parallel shell to complete.
+awaitShell();
+
+IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
+
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/compression_options.js b/jstests/noPassthrough/compression_options.js
index c6f4ccadc68..db4b9b4d050 100644
--- a/jstests/noPassthrough/compression_options.js
+++ b/jstests/noPassthrough/compression_options.js
@@ -1,43 +1,42 @@
// Tests --networkMessageCompressors options.
(function() {
- 'use strict';
-
- var runTest = function(optionValue, expected) {
- jsTest.log("Testing with --networkMessageCompressors=\"" + optionValue + "\" expecting: " +
- expected);
- var mongo = MongoRunner.runMongod({networkMessageCompressors: optionValue});
- assert.commandWorked(mongo.adminCommand({isMaster: 1}));
- clearRawMongoProgramOutput();
- assert.eq(runMongoProgram("mongo",
- "--eval",
- "tostrictjson(db.isMaster());",
- "--port",
- mongo.port,
- "--networkMessageCompressors=snappy"),
- 0);
-
- var output = rawMongoProgramOutput()
- .split("\n")
- .map(function(str) {
- str = str.replace(/^sh[0-9]+\| /, "");
- if (!/^{/.test(str)) {
- return "";
- }
- return str;
- })
- .join("\n")
- .trim();
-
- output = JSON.parse(output);
-
- assert.eq(output.compression, expected);
- MongoRunner.stopMongod(mongo);
- };
-
- assert.isnull(MongoRunner.runMongod({networkMessageCompressors: "snappy,disabled"}));
-
- runTest("snappy", ["snappy"]);
- runTest("disabled", undefined);
-
+'use strict';
+
+var runTest = function(optionValue, expected) {
+ jsTest.log("Testing with --networkMessageCompressors=\"" + optionValue +
+ "\" expecting: " + expected);
+ var mongo = MongoRunner.runMongod({networkMessageCompressors: optionValue});
+ assert.commandWorked(mongo.adminCommand({isMaster: 1}));
+ clearRawMongoProgramOutput();
+ assert.eq(runMongoProgram("mongo",
+ "--eval",
+ "tostrictjson(db.isMaster());",
+ "--port",
+ mongo.port,
+ "--networkMessageCompressors=snappy"),
+ 0);
+
+ var output = rawMongoProgramOutput()
+ .split("\n")
+ .map(function(str) {
+ str = str.replace(/^sh[0-9]+\| /, "");
+ if (!/^{/.test(str)) {
+ return "";
+ }
+ return str;
+ })
+ .join("\n")
+ .trim();
+
+ output = JSON.parse(output);
+
+ assert.eq(output.compression, expected);
+ MongoRunner.stopMongod(mongo);
+};
+
+assert.isnull(MongoRunner.runMongod({networkMessageCompressors: "snappy,disabled"}));
+
+runTest("snappy", ["snappy"]);
+runTest("disabled", undefined);
}());
diff --git a/jstests/noPassthrough/configExpand_exec_digest.js b/jstests/noPassthrough/configExpand_exec_digest.js
index 90457f70dc8..e01c1fcd1d6 100644
--- a/jstests/noPassthrough/configExpand_exec_digest.js
+++ b/jstests/noPassthrough/configExpand_exec_digest.js
@@ -1,60 +1,57 @@
// Test config file expansion using EXEC with digests.
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/configExpand/lib.js');
-
- // hash === SHA256HMAC('12345', 'secret')
- const hash = 'f88c7ebe4740db59c873cecf5e1f18e3726a1ad64068a13d764b79028430ab0e';
-
- // Simple positive case.
- configExpandSuccess({
- setParameter: {
- scramIterationCount:
- {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563726574'}
- }
- });
-
- // Invalid digest length.
- configExpandFailure({
- setParameter: {
- scramIteratorCount:
- {__exec: makeReflectionCmd('12345'), digest: '123', digest_key: '736563726574'}
- }
- },
- /digest: Not a valid, even length hex string/);
-
- // Invalid characters.
- configExpandFailure({
- setParameter: {
- scramIteratorCount:
- {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563X26574'}
- }
- },
- /digest_key: Not a valid, even length hex string/);
-
- // Digest without key.
- configExpandFailure(
- {setParameter: {scramIteratorCount: {__exec: makeReflectionCmd('12345'), digest: hash}}},
- /digest requires digest_key/);
-
- // Empty digest_key.
- configExpandFailure({
- setParameter: {
- scramIteratorCount:
- {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: ''}
- }
- },
- /digest_key must not be empty/);
-
- // Mismatched digests.
- configExpandFailure({
- setParameter: {
- scramIteratorCount:
- {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563726575'}
- }
- },
- /does not match expected digest/);
-
+'use strict';
+
+load('jstests/noPassthrough/libs/configExpand/lib.js');
+
+// hash === SHA256HMAC('12345', 'secret')
+const hash = 'f88c7ebe4740db59c873cecf5e1f18e3726a1ad64068a13d764b79028430ab0e';
+
+// Simple positive case.
+configExpandSuccess({
+ setParameter: {
+ scramIterationCount:
+ {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563726574'}
+ }
+});
+
+// Invalid digest length.
+configExpandFailure({
+ setParameter: {
+ scramIteratorCount:
+ {__exec: makeReflectionCmd('12345'), digest: '123', digest_key: '736563726574'}
+ }
+},
+ /digest: Not a valid, even length hex string/);
+
+// Invalid characters.
+configExpandFailure({
+ setParameter: {
+ scramIteratorCount:
+ {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563X26574'}
+ }
+},
+ /digest_key: Not a valid, even length hex string/);
+
+// Digest without key.
+configExpandFailure(
+ {setParameter: {scramIteratorCount: {__exec: makeReflectionCmd('12345'), digest: hash}}},
+ /digest requires digest_key/);
+
+// Empty digest_key.
+configExpandFailure({
+ setParameter:
+ {scramIteratorCount: {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: ''}}
+},
+ /digest_key must not be empty/);
+
+// Mismatched digests.
+configExpandFailure({
+ setParameter: {
+ scramIteratorCount:
+ {__exec: makeReflectionCmd('12345'), digest: hash, digest_key: '736563726575'}
+ }
+},
+ /does not match expected digest/);
})();
diff --git a/jstests/noPassthrough/configExpand_exec_noexpand.js b/jstests/noPassthrough/configExpand_exec_noexpand.js
index 03e147f036a..4b07036b9c6 100644
--- a/jstests/noPassthrough/configExpand_exec_noexpand.js
+++ b/jstests/noPassthrough/configExpand_exec_noexpand.js
@@ -1,27 +1,29 @@
// Test config file expansion using EXEC.
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- // Unexpected elements.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345'), foo: 'bar'},
- }
- },
- /expansion block must contain only '__exec'/);
+// Unexpected elements.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345'), foo: 'bar'},
+ }
+},
+ /expansion block must contain only '__exec'/);
- const sicReflect = {setParameter: {scramIterationCount: {__exec: makeReflectionCmd('12345')}}};
+const sicReflect = {
+ setParameter: {scramIterationCount: {__exec: makeReflectionCmd('12345')}}
+};
- // Positive test just to be sure this works in a basic case before testing negatives.
- configExpandSuccess(sicReflect);
+// Positive test just to be sure this works in a basic case before testing negatives.
+configExpandSuccess(sicReflect);
- // Expansion not enabled.
- configExpandFailure(sicReflect, /__exec support has not been enabled/, {configExpand: 'none'});
+// Expansion not enabled.
+configExpandFailure(sicReflect, /__exec support has not been enabled/, {configExpand: 'none'});
- // Expansion enabled, but not recursively.
- configExpandFailure({__exec: makeReflectionCmd(jsToYaml(sicReflect)), type: 'yaml'},
- /__exec support has not been enabled/);
+// Expansion enabled, but not recursively.
+configExpandFailure({__exec: makeReflectionCmd(jsToYaml(sicReflect)), type: 'yaml'},
+ /__exec support has not been enabled/);
})();
diff --git a/jstests/noPassthrough/configExpand_exec_permissions.js b/jstests/noPassthrough/configExpand_exec_permissions.js
index 2aed009eda9..4563d5d20f1 100644
--- a/jstests/noPassthrough/configExpand_exec_permissions.js
+++ b/jstests/noPassthrough/configExpand_exec_permissions.js
@@ -3,30 +3,32 @@
// but that's impractical in a test suite where we're not running as root.
(function() {
- 'use strict';
+'use strict';
- if (_isWindows()) {
- print("Skipping test on windows");
- return;
- }
+if (_isWindows()) {
+ print("Skipping test on windows");
+ return;
+}
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const sicReflect = {setParameter: {scramIterationCount: {__exec: makeReflectionCmd('12345')}}};
+const sicReflect = {
+ setParameter: {scramIterationCount: {__exec: makeReflectionCmd('12345')}}
+};
- // Positive test just to be sure this works in a basic case before testing negatives.
- configExpandSuccess(sicReflect, null, {configExpand: 'exec', chmod: 0o600});
+// Positive test just to be sure this works in a basic case before testing negatives.
+configExpandSuccess(sicReflect, null, {configExpand: 'exec', chmod: 0o600});
- // Still successful if readable by others, but not writable.
- configExpandSuccess(sicReflect, null, {configExpand: 'exec', chmod: 0o644});
+// Still successful if readable by others, but not writable.
+configExpandSuccess(sicReflect, null, {configExpand: 'exec', chmod: 0o644});
- // Fail if writable by others.
- const expect = /is writable by non-owner users/;
- configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o666});
- configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o622});
- configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o660});
- configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o606});
+// Fail if writable by others.
+const expect = /is writable by non-owner users/;
+configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o666});
+configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o622});
+configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o660});
+configExpandFailure(sicReflect, expect, {configExpand: 'exec', chmod: 0o606});
- // Explicitly world-readable/writable config file without expansions should be fine.
- configExpandSuccess({}, null, {configExpand: 'none', chmod: 0o666});
+// Explicitly world-readable/writable config file without expansions should be fine.
+configExpandSuccess({}, null, {configExpand: 'none', chmod: 0o666});
})();
diff --git a/jstests/noPassthrough/configExpand_exec_timeeout.js b/jstests/noPassthrough/configExpand_exec_timeeout.js
index 7434790fc3f..72108855d68 100644
--- a/jstests/noPassthrough/configExpand_exec_timeeout.js
+++ b/jstests/noPassthrough/configExpand_exec_timeeout.js
@@ -1,31 +1,31 @@
// Test config file expansion using EXEC.
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- // Sleep 10 seconds during request.
- configExpandSuccess({
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 10})},
- }
- });
+// Sleep 10 seconds during request.
+configExpandSuccess({
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 10})},
+ }
+});
- // Sleep 40 seconds during request, with default 30 second timeout.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 40})},
- }
- },
- /Timeout expired/);
+// Sleep 40 seconds during request, with default 30 second timeout.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 40})},
+ }
+},
+ /Timeout expired/);
- // Sleep 10 seconds during request, with custom 5 second timeout.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 10})},
- }
- },
- /Timeout expired/,
- {configExpandTimeoutSecs: 5});
+// Sleep 10 seconds during request, with custom 5 second timeout.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345', {sleep: 10})},
+ }
+},
+ /Timeout expired/,
+ {configExpandTimeoutSecs: 5});
})();
diff --git a/jstests/noPassthrough/configExpand_exec_values.js b/jstests/noPassthrough/configExpand_exec_values.js
index 21b9e493ea1..f4c85b3713d 100644
--- a/jstests/noPassthrough/configExpand_exec_values.js
+++ b/jstests/noPassthrough/configExpand_exec_values.js
@@ -1,28 +1,27 @@
// Test config file expansion using EXEC.
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
-
- // Basic success case
- configExpandSuccess(
- {
- setParameter: {
- scramIterationCount: {__exec: makeReflectionCmd('12345')},
- scramSHA256IterationCount:
- {__exec: makeReflectionCmd("23456\n"), type: 'string', trim: 'whitespace'}
- }
- },
- function(admin) {
- const response = assert.commandWorked(admin.runCommand(
- {getParameter: 1, scramIterationCount: 1, scramSHA256IterationCount: 1}));
- assert.eq(response.scramIterationCount,
- 12345,
- "Incorrect derived config value for scramIterationCount");
- assert.eq(response.scramSHA256IterationCount,
- 23456,
- "Incorrect derived config value scramSHA256IterationCount");
- });
+load('jstests/noPassthrough/libs/configExpand/lib.js');
+// Basic success case
+configExpandSuccess(
+ {
+ setParameter: {
+ scramIterationCount: {__exec: makeReflectionCmd('12345')},
+ scramSHA256IterationCount:
+ {__exec: makeReflectionCmd("23456\n"), type: 'string', trim: 'whitespace'}
+ }
+ },
+ function(admin) {
+ const response = assert.commandWorked(admin.runCommand(
+ {getParameter: 1, scramIterationCount: 1, scramSHA256IterationCount: 1}));
+ assert.eq(response.scramIterationCount,
+ 12345,
+ "Incorrect derived config value for scramIterationCount");
+ assert.eq(response.scramSHA256IterationCount,
+ 23456,
+ "Incorrect derived config value scramSHA256IterationCount");
+ });
})();
diff --git a/jstests/noPassthrough/configExpand_exec_wholeconfig.js b/jstests/noPassthrough/configExpand_exec_wholeconfig.js
index 9fac3848271..f4c0cf5dd78 100644
--- a/jstests/noPassthrough/configExpand_exec_wholeconfig.js
+++ b/jstests/noPassthrough/configExpand_exec_wholeconfig.js
@@ -1,14 +1,14 @@
// Test config file expansion using EXEC at top level.
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const yamlConfig = jsToYaml({setParameter: {scramIterationCount: 12345}});
- configExpandSuccess({__exec: makeReflectionCmd(yamlConfig), type: 'yaml'}, function(admin) {
- const response =
- assert.commandWorked(admin.runCommand({getParameter: 1, scramIterationCount: 1}));
- assert.eq(response.scramIterationCount, 12345, "Incorrect derived config value");
- });
+const yamlConfig = jsToYaml({setParameter: {scramIterationCount: 12345}});
+configExpandSuccess({__exec: makeReflectionCmd(yamlConfig), type: 'yaml'}, function(admin) {
+ const response =
+ assert.commandWorked(admin.runCommand({getParameter: 1, scramIterationCount: 1}));
+ assert.eq(response.scramIterationCount, 12345, "Incorrect derived config value");
+});
})();
diff --git a/jstests/noPassthrough/configExpand_rest_noexpand.js b/jstests/noPassthrough/configExpand_rest_noexpand.js
index d80f4c33ae6..28200e032dd 100644
--- a/jstests/noPassthrough/configExpand_rest_noexpand.js
+++ b/jstests/noPassthrough/configExpand_rest_noexpand.js
@@ -2,37 +2,35 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- // Unexpected elements.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__rest: web.getStringReflectionURL('12345'), foo: 'bar'},
- }
- },
- /expansion block must contain only '__rest'/);
+// Unexpected elements.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__rest: web.getStringReflectionURL('12345'), foo: 'bar'},
+ }
+},
+ /expansion block must contain only '__rest'/);
- const sicReflect = {
- setParameter: {scramIterationCount: {__rest: web.getStringReflectionURL('12345')}}
- };
+const sicReflect = {
+ setParameter: {scramIterationCount: {__rest: web.getStringReflectionURL('12345')}}
+};
- // Positive test just to be sure this works in a basic case before testing negatives.
- configExpandSuccess(sicReflect);
+// Positive test just to be sure this works in a basic case before testing negatives.
+configExpandSuccess(sicReflect);
- // Expansion not enabled.
- configExpandFailure(sicReflect, /__rest support has not been enabled/, {configExpand: 'none'});
+// Expansion not enabled.
+configExpandFailure(sicReflect, /__rest support has not been enabled/, {configExpand: 'none'});
- // Expansion enabled, but not recursively.
- configExpandFailure({
- __rest: web.getURL() + '/reflect/yaml?yaml=' + encodeURI(jsToYaml(sicReflect)),
- type: 'yaml'
- },
- /__rest support has not been enabled/);
+// Expansion enabled, but not recursively.
+configExpandFailure(
+ {__rest: web.getURL() + '/reflect/yaml?yaml=' + encodeURI(jsToYaml(sicReflect)), type: 'yaml'},
+ /__rest support has not been enabled/);
- web.stop();
+web.stop();
})();
diff --git a/jstests/noPassthrough/configExpand_rest_permissions.js b/jstests/noPassthrough/configExpand_rest_permissions.js
index 318dd083bab..49749dddb9e 100644
--- a/jstests/noPassthrough/configExpand_rest_permissions.js
+++ b/jstests/noPassthrough/configExpand_rest_permissions.js
@@ -2,34 +2,34 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- if (_isWindows()) {
- print("Skipping test on windows");
- return;
- }
+if (_isWindows()) {
+ print("Skipping test on windows");
+ return;
+}
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- const sicReflect = {
- setParameter: {scramIterationCount: {__rest: web.getStringReflectionURL('12345')}}
- };
+const sicReflect = {
+ setParameter: {scramIterationCount: {__rest: web.getStringReflectionURL('12345')}}
+};
- // Positive test just to be sure this works in a basic case before testing negatives.
- configExpandSuccess(sicReflect, null, {configExpand: 'rest', chmod: 0o600});
+// Positive test just to be sure this works in a basic case before testing negatives.
+configExpandSuccess(sicReflect, null, {configExpand: 'rest', chmod: 0o600});
- // Still successful if writable by others, but not readable.
- configExpandSuccess(sicReflect, null, {configExpand: 'rest', chmod: 0o622});
+// Still successful if writable by others, but not readable.
+configExpandSuccess(sicReflect, null, {configExpand: 'rest', chmod: 0o622});
- // Fail if readable by others.
- const expect = /is readable by non-owner users/;
- configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o666});
- configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o644});
- configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o660});
- configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o606});
+// Fail if readable by others.
+const expect = /is readable by non-owner users/;
+configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o666});
+configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o644});
+configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o660});
+configExpandFailure(sicReflect, expect, {configExpand: 'rest', chmod: 0o606});
- web.stop();
+web.stop();
})();
diff --git a/jstests/noPassthrough/configExpand_rest_timeout.js b/jstests/noPassthrough/configExpand_rest_timeout.js
index 532ce4e6283..5c193c94de1 100644
--- a/jstests/noPassthrough/configExpand_rest_timeout.js
+++ b/jstests/noPassthrough/configExpand_rest_timeout.js
@@ -2,36 +2,36 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- // Sleep 10 seconds during request.
- configExpandSuccess({
- setParameter: {
- scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 10})},
- }
- });
+// Sleep 10 seconds during request.
+configExpandSuccess({
+ setParameter: {
+ scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 10})},
+ }
+});
- // Sleep 40 seconds during request, with default 30 second timeout.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 40})},
- }
- },
- /Timeout was reached/);
+// Sleep 40 seconds during request, with default 30 second timeout.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 40})},
+ }
+},
+ /Timeout was reached/);
- // Sleep 10 seconds during request, with custom 5 second timeout.
- configExpandFailure({
- setParameter: {
- scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 10})},
- }
- },
- /Timeout was reached/,
- {configExpandTimeoutSecs: 5});
+// Sleep 10 seconds during request, with custom 5 second timeout.
+configExpandFailure({
+ setParameter: {
+ scramIterationCount: {__rest: web.getStringReflectionURL('12345', {sleep: 10})},
+ }
+},
+ /Timeout was reached/,
+ {configExpandTimeoutSecs: 5});
- web.stop();
+web.stop();
})();
diff --git a/jstests/noPassthrough/configExpand_rest_values.js b/jstests/noPassthrough/configExpand_rest_values.js
index 7aa56dbfb77..6ffebe592f7 100644
--- a/jstests/noPassthrough/configExpand_rest_values.js
+++ b/jstests/noPassthrough/configExpand_rest_values.js
@@ -2,47 +2,42 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- // Basic success case
- configExpandSuccess({
+// Basic success case
+configExpandSuccess(
+ {
setParameter: {
scramIterationCount: {__rest: web.getStringReflectionURL('12345')},
scramSHA256IterationCount:
{__rest: web.getStringReflectionURL('23456'), type: 'string', trim: 'whitespace'}
}
},
- function(admin) {
- const response = assert.commandWorked(admin.runCommand({
- getParameter: 1,
- scramIterationCount: 1,
- scramSHA256IterationCount: 1
- }));
- assert.eq(response.scramIterationCount,
- 12345,
- "Incorrect derived config value for scramIterationCount");
- assert.eq(response.scramSHA256IterationCount,
- 23456,
- "Incorrect derived config value scramSHA256IterationCount");
- });
-
- // With digest
- // SHA256HMAC('12345', 'secret')
- const hash = 'f88c7ebe4740db59c873cecf5e1f18e3726a1ad64068a13d764b79028430ab0e';
- configExpandSuccess({
- setParameter: {
- scramIterationCount: {
- __rest: web.getStringReflectionURL('12345'),
- digest: hash,
- digest_key: '736563726574'
- }
- }
+ function(admin) {
+ const response = assert.commandWorked(admin.runCommand(
+ {getParameter: 1, scramIterationCount: 1, scramSHA256IterationCount: 1}));
+ assert.eq(response.scramIterationCount,
+ 12345,
+ "Incorrect derived config value for scramIterationCount");
+ assert.eq(response.scramSHA256IterationCount,
+ 23456,
+ "Incorrect derived config value scramSHA256IterationCount");
});
- web.stop();
+// With digest
+// SHA256HMAC('12345', 'secret')
+const hash = 'f88c7ebe4740db59c873cecf5e1f18e3726a1ad64068a13d764b79028430ab0e';
+configExpandSuccess({
+ setParameter: {
+ scramIterationCount:
+ {__rest: web.getStringReflectionURL('12345'), digest: hash, digest_key: '736563726574'}
+ }
+});
+
+web.stop();
})();
diff --git a/jstests/noPassthrough/configExpand_rest_wholeconfig.js b/jstests/noPassthrough/configExpand_rest_wholeconfig.js
index 9be592e5eff..e4d6b87cfdc 100644
--- a/jstests/noPassthrough/configExpand_rest_wholeconfig.js
+++ b/jstests/noPassthrough/configExpand_rest_wholeconfig.js
@@ -2,21 +2,21 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/configExpand/lib.js');
+load('jstests/noPassthrough/libs/configExpand/lib.js');
- const web = new ConfigExpandRestServer();
- web.start();
+const web = new ConfigExpandRestServer();
+web.start();
- const yamlConfig = jsToYaml({setParameter: {scramIterationCount: 12345}});
- configExpandSuccess(
- {__rest: web.getURL() + '/reflect/yaml?yaml=' + encodeURI(yamlConfig), type: 'yaml'},
- function(admin) {
- const response =
- assert.commandWorked(admin.runCommand({getParameter: 1, scramIterationCount: 1}));
- assert.eq(response.scramIterationCount, 12345, "Incorrect derived config value");
- });
+const yamlConfig = jsToYaml({setParameter: {scramIterationCount: 12345}});
+configExpandSuccess(
+ {__rest: web.getURL() + '/reflect/yaml?yaml=' + encodeURI(yamlConfig), type: 'yaml'},
+ function(admin) {
+ const response =
+ assert.commandWorked(admin.runCommand({getParameter: 1, scramIterationCount: 1}));
+ assert.eq(response.scramIterationCount, 12345, "Incorrect derived config value");
+ });
- web.stop();
+web.stop();
})();
diff --git a/jstests/noPassthrough/count_helper_read_preference.js b/jstests/noPassthrough/count_helper_read_preference.js
index 25aa019462f..28762ca26ee 100644
--- a/jstests/noPassthrough/count_helper_read_preference.js
+++ b/jstests/noPassthrough/count_helper_read_preference.js
@@ -1,50 +1,49 @@
// Tests that the read preference set on the connection is used when we call the count helper.
(function() {
- "use strict";
-
- var commandsRan = [];
-
- // Create a new DB object backed by a mock connection.
- function MockMongo() {
- this.getMinWireVersion = function getMinWireVersion() {
- return 0;
- };
-
- this.getMaxWireVersion = function getMaxWireVersion() {
- return 0;
- };
- }
- MockMongo.prototype = Mongo.prototype;
- MockMongo.prototype.runCommand = function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1, n: 100};
+"use strict";
+
+var commandsRan = [];
+
+// Create a new DB object backed by a mock connection.
+function MockMongo() {
+ this.getMinWireVersion = function getMinWireVersion() {
+ return 0;
};
- const mockMongo = new MockMongo();
- var db = new DB(mockMongo, "test");
+ this.getMaxWireVersion = function getMaxWireVersion() {
+ return 0;
+ };
+}
+MockMongo.prototype = Mongo.prototype;
+MockMongo.prototype.runCommand = function(db, cmd, opts) {
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {ok: 1, n: 100};
+};
- // Attach a dummy implicit session because the mock connection cannot create sessions.
- db._session = new _DummyDriverSession(mockMongo);
+const mockMongo = new MockMongo();
+var db = new DB(mockMongo, "test");
- assert.eq(commandsRan.length, 0);
+// Attach a dummy implicit session because the mock connection cannot create sessions.
+db._session = new _DummyDriverSession(mockMongo);
- // Run a count with no readPref.
- db.getMongo().setReadPref(null);
- db.foo.count();
+assert.eq(commandsRan.length, 0);
- // Check that there is no readPref on the command document.
- assert.eq(commandsRan.length, 1);
- assert.docEq(commandsRan[0].cmd, {count: "foo", query: {}});
+// Run a count with no readPref.
+db.getMongo().setReadPref(null);
+db.foo.count();
- commandsRan = [];
+// Check that there is no readPref on the command document.
+assert.eq(commandsRan.length, 1);
+assert.docEq(commandsRan[0].cmd, {count: "foo", query: {}});
- // Run with readPref secondary.
- db.getMongo().setReadPref("secondary");
- db.foo.count();
+commandsRan = [];
- // Check that we have wrapped the command and attached the read preference.
- assert.eq(commandsRan.length, 1);
- assert.docEq(commandsRan[0].cmd,
- {query: {count: "foo", query: {}}, $readPreference: {mode: "secondary"}});
+// Run with readPref secondary.
+db.getMongo().setReadPref("secondary");
+db.foo.count();
+// Check that we have wrapped the command and attached the read preference.
+assert.eq(commandsRan.length, 1);
+assert.docEq(commandsRan[0].cmd,
+ {query: {count: "foo", query: {}}, $readPreference: {mode: "secondary"}});
})();
diff --git a/jstests/noPassthrough/create_view_does_not_take_database_X.js b/jstests/noPassthrough/create_view_does_not_take_database_X.js
index e7615152d5a..e35cae01e10 100644
--- a/jstests/noPassthrough/create_view_does_not_take_database_X.js
+++ b/jstests/noPassthrough/create_view_does_not_take_database_X.js
@@ -5,29 +5,29 @@
*/
(function() {
- "use strict";
+"use strict";
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- let db = rst.getPrimary().getDB("test");
+let db = rst.getPrimary().getDB("test");
- assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase("test");
+const session = db.getMongo().startSession();
+const sessionDb = session.getDatabase("test");
- session.startTransaction();
- // This holds a database IX lock and a collection IX lock on "a".
- sessionDb.a.insert({y: 1});
+session.startTransaction();
+// This holds a database IX lock and a collection IX lock on "a".
+sessionDb.a.insert({y: 1});
- // This only requires database IX lock.
- assert.commandWorked(db.createView("view", "a", []));
+// This only requires database IX lock.
+assert.commandWorked(db.createView("view", "a", []));
- assert.eq(db.view.find().toArray().length, 1);
+assert.eq(db.view.find().toArray().length, 1);
- assert.commandWorked(session.commitTransaction_forTesting());
+assert.commandWorked(session.commitTransaction_forTesting());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js
index bed13b06bec..2183e6da600 100644
--- a/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js
+++ b/jstests/noPassthrough/cross_user_getmore_has_no_side_effects.js
@@ -4,64 +4,76 @@
// @tags: [requires_sharding]
(function() {
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const st = new ShardingTest(
- {shards: 2, config: 1, other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}});
- const kDBName = "test";
- const adminDB = st.s.getDB('admin');
- const testDB = st.s.getDB(kDBName);
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const st = new ShardingTest(
+ {shards: 2, config: 1, other: {keyFile: "jstests/libs/key1", shardAsReplicaSet: false}});
+const kDBName = "test";
+const adminDB = st.s.getDB('admin');
+const testDB = st.s.getDB(kDBName);
- jsTest.authenticate(st.shard0);
+jsTest.authenticate(st.shard0);
- const adminUser = {db: "admin", username: "foo", password: "bar"};
- const userA = {db: "test", username: "a", password: "pwd"};
- const userB = {db: "test", username: "b", password: "pwd"};
+const adminUser = {
+ db: "admin",
+ username: "foo",
+ password: "bar"
+};
+const userA = {
+ db: "test",
+ username: "a",
+ password: "pwd"
+};
+const userB = {
+ db: "test",
+ username: "b",
+ password: "pwd"
+};
- function login(userObj) {
- st.s.getDB(userObj.db).auth(userObj.username, userObj.password);
- }
+function login(userObj) {
+ st.s.getDB(userObj.db).auth(userObj.username, userObj.password);
+}
- function logout(userObj) {
- st.s.getDB(userObj.db).runCommand({logout: 1});
- }
+function logout(userObj) {
+ st.s.getDB(userObj.db).runCommand({logout: 1});
+}
- adminDB.createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+adminDB.createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
- login(adminUser);
+login(adminUser);
- let coll = testDB.security_501;
- coll.drop();
+let coll = testDB.security_501;
+coll.drop();
- for (let i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i}));
- }
+for (let i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({_id: i}));
+}
- // Create our two users.
- for (let user of[userA, userB]) {
- testDB.createUser({
- user: user.username,
- pwd: user.password,
- roles: [{role: "readWriteAnyDatabase", db: "admin"}]
- });
- }
- logout(adminUser);
+// Create our two users.
+for (let user of [userA, userB]) {
+ testDB.createUser({
+ user: user.username,
+ pwd: user.password,
+ roles: [{role: "readWriteAnyDatabase", db: "admin"}]
+ });
+}
+logout(adminUser);
- // As userA, run a find and get a cursor.
- login(userA);
- const cursorID =
- assert.commandWorked(testDB.runCommand({find: coll.getName(), batchSize: 2})).cursor.id;
- logout(userA);
+// As userA, run a find and get a cursor.
+login(userA);
+const cursorID =
+ assert.commandWorked(testDB.runCommand({find: coll.getName(), batchSize: 2})).cursor.id;
+logout(userA);
- // As userB, attempt to getMore the cursor ID.
- login(userB);
- assert.commandFailed(testDB.runCommand({getMore: cursorID, collection: coll.getName()}));
- logout(userB);
+// As userB, attempt to getMore the cursor ID.
+login(userB);
+assert.commandFailed(testDB.runCommand({getMore: cursorID, collection: coll.getName()}));
+logout(userB);
- // As user A again, try to getMore the cursor.
- login(userA);
- assert.commandWorked(testDB.runCommand({getMore: cursorID, collection: coll.getName()}));
- logout(userA);
+// As user A again, try to getMore the cursor.
+login(userA);
+assert.commandWorked(testDB.runCommand({getMore: cursorID, collection: coll.getName()}));
+logout(userA);
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/crud_timestamps.js b/jstests/noPassthrough/crud_timestamps.js
index a89e112ac29..07718be5bbc 100644
--- a/jstests/noPassthrough/crud_timestamps.js
+++ b/jstests/noPassthrough/crud_timestamps.js
@@ -5,107 +5,110 @@
//
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const testDB = rst.getPrimary().getDB(dbName);
- const coll = testDB.getCollection(collName);
-
- if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- rst.stopSet();
- return;
- }
-
- // Turn off timestamp reaping.
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn",
- }));
-
- const session = testDB.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase(dbName);
- const response = assert.commandWorked(testDB.createCollection("coll"));
- const startTime = response.operationTime;
-
- function check(atClusterTime, expected) {
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: atClusterTime}});
- // Check both a collection scan and scanning the _id index.
- [{$natural: 1}, {_id: 1}].forEach(sort => {
- let response = assert.commandWorked(
- sessionDb.runCommand({find: collName, sort: sort, singleBatch: true}));
- assert.eq(expected, response.cursor.firstBatch);
- });
- assert.commandWorked(session.commitTransaction_forTesting());
- }
-
- // insert
-
- let request = {insert: coll.getName(), documents: [{_id: 1}, {_id: 2}], ordered: false};
- assert.commandWorked(coll.runCommand(request));
-
- const oplog = rst.getPrimary().getDB("local").getCollection("oplog.rs");
- let ts1 = oplog.findOne({o: {_id: 1}}).ts;
- let ts2 = oplog.findOne({o: {_id: 2}}).ts;
-
- check(startTime, []);
- check(ts1, [{_id: 1}]);
- check(ts2, [{_id: 1}, {_id: 2}]);
-
- // upsert
-
- request = {
- update: coll.getName(),
- updates: [
- {q: {_id: 3, a: 1}, u: {$set: {a: 2}}, upsert: true},
- {q: {_id: 4, a: 1}, u: {$set: {a: 3}}, upsert: true}
- ],
- ordered: true
- };
- assert.commandWorked(coll.runCommand(request));
-
- ts1 = oplog.findOne({o: {_id: 3, a: 2}}).ts;
- ts2 = oplog.findOne({o: {_id: 4, a: 3}}).ts;
-
- check(ts1, [{_id: 1}, {_id: 2}, {_id: 3, a: 2}]);
- check(ts2, [{_id: 1}, {_id: 2}, {_id: 3, a: 2}, {_id: 4, a: 3}]);
-
- // update
-
- request = {
- update: coll.getName(),
- updates: [{q: {_id: 3, a: 2}, u: {$set: {a: 4}}}, {q: {_id: 4, a: 3}, u: {$set: {a: 5}}}],
- ordered: true
- };
- assert.commandWorked(coll.runCommand(request));
-
- ts1 = oplog.findOne({op: 'u', o2: {_id: 3}}).ts;
- ts2 = oplog.findOne({op: 'u', o2: {_id: 4}}).ts;
-
- check(ts1, [{_id: 1}, {_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 3}]);
- check(ts2, [{_id: 1}, {_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
-
- // delete
-
- request = {delete: coll.getName(), deletes: [{q: {}, limit: 0}], ordered: false};
-
- assert.commandWorked(coll.runCommand(request));
-
- ts1 = oplog.findOne({op: 'd', o: {_id: 1}}).ts;
- ts2 = oplog.findOne({op: 'd', o: {_id: 2}}).ts;
- let ts3 = oplog.findOne({op: 'd', o: {_id: 3}}).ts;
- let ts4 = oplog.findOne({op: 'd', o: {_id: 4}}).ts;
-
- check(ts1, [{_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
- check(ts2, [{_id: 3, a: 4}, {_id: 4, a: 5}]);
- check(ts3, [{_id: 4, a: 5}]);
- check(ts4, []);
-
- session.endSession();
- rst.stopSet();
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const testDB = rst.getPrimary().getDB(dbName);
+const coll = testDB.getCollection(collName);
+
+if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
+ rst.stopSet();
+ return;
+}
+
+// Turn off timestamp reaping.
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn",
+}));
+
+const session = testDB.getMongo().startSession({causalConsistency: false});
+const sessionDb = session.getDatabase(dbName);
+const response = assert.commandWorked(testDB.createCollection("coll"));
+const startTime = response.operationTime;
+
+function check(atClusterTime, expected) {
+ session.startTransaction({readConcern: {level: "snapshot", atClusterTime: atClusterTime}});
+ // Check both a collection scan and scanning the _id index.
+ [{$natural: 1}, {_id: 1}].forEach(sort => {
+ let response = assert.commandWorked(
+ sessionDb.runCommand({find: collName, sort: sort, singleBatch: true}));
+ assert.eq(expected, response.cursor.firstBatch);
+ });
+ assert.commandWorked(session.commitTransaction_forTesting());
+}
+
+// insert
+
+let request = {insert: coll.getName(), documents: [{_id: 1}, {_id: 2}], ordered: false};
+assert.commandWorked(coll.runCommand(request));
+
+const oplog = rst.getPrimary().getDB("local").getCollection("oplog.rs");
+let ts1 = oplog.findOne({o: {_id: 1}}).ts;
+let ts2 = oplog.findOne({o: {_id: 2}}).ts;
+
+check(startTime, []);
+check(ts1, [{_id: 1}]);
+check(ts2, [{_id: 1}, {_id: 2}]);
+
+// upsert
+
+request = {
+ update: coll.getName(),
+ updates: [
+ {q: {_id: 3, a: 1}, u: {$set: {a: 2}}, upsert: true},
+ {q: {_id: 4, a: 1}, u: {$set: {a: 3}}, upsert: true}
+ ],
+ ordered: true
+};
+assert.commandWorked(coll.runCommand(request));
+
+ts1 = oplog.findOne({o: {_id: 3, a: 2}}).ts;
+ts2 = oplog.findOne({o: {_id: 4, a: 3}}).ts;
+
+check(ts1, [{_id: 1}, {_id: 2}, {_id: 3, a: 2}]);
+check(ts2, [{_id: 1}, {_id: 2}, {_id: 3, a: 2}, {_id: 4, a: 3}]);
+
+// update
+
+request = {
+ update: coll.getName(),
+ updates: [{q: {_id: 3, a: 2}, u: {$set: {a: 4}}}, {q: {_id: 4, a: 3}, u: {$set: {a: 5}}}],
+ ordered: true
+};
+assert.commandWorked(coll.runCommand(request));
+
+ts1 = oplog.findOne({op: 'u', o2: {_id: 3}}).ts;
+ts2 = oplog.findOne({op: 'u', o2: {_id: 4}}).ts;
+
+check(ts1, [{_id: 1}, {_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 3}]);
+check(ts2, [{_id: 1}, {_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
+
+// delete
+
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {}, limit: 0}],
+ ordered: false
+};
+
+assert.commandWorked(coll.runCommand(request));
+
+ts1 = oplog.findOne({op: 'd', o: {_id: 1}}).ts;
+ts2 = oplog.findOne({op: 'd', o: {_id: 2}}).ts;
+let ts3 = oplog.findOne({op: 'd', o: {_id: 3}}).ts;
+let ts4 = oplog.findOne({op: 'd', o: {_id: 4}}).ts;
+
+check(ts1, [{_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]);
+check(ts2, [{_id: 3, a: 4}, {_id: 4, a: 5}]);
+check(ts3, [{_id: 4, a: 5}]);
+check(ts4, []);
+
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/currentop_active_cursor.js b/jstests/noPassthrough/currentop_active_cursor.js
index c0a37322050..9bfb27e2564 100644
--- a/jstests/noPassthrough/currentop_active_cursor.js
+++ b/jstests/noPassthrough/currentop_active_cursor.js
@@ -2,109 +2,107 @@
// Then test and make sure a pinned cursor shows up in the operation object.
// @tags: [requires_sharding]
(function() {
- "use strict";
- load("jstests/libs/pin_getmore_cursor.js"); // for "withPinnedCursor"
+"use strict";
+load("jstests/libs/pin_getmore_cursor.js"); // for "withPinnedCursor"
- function runTest(cursorId, coll) {
- const db = coll.getDB();
- const adminDB = db.getSiblingDB("admin");
- // Test that active cursors do not show up as idle cursors.
- const idleCursors =
- adminDB
- .aggregate([
- {"$currentOp": {"localOps": true, "idleCursors": true, "allUsers": false}},
- {"$match": {"type": "idleCursor"}}
- ])
- .toArray();
- assert.eq(idleCursors.length, 0, tojson(idleCursors));
- // Test that an active cursor shows up in currentOp.
- const activeCursors =
- adminDB
- .aggregate([
- {"$currentOp": {"localOps": true, "idleCursors": false, "allUsers": false}},
- {"$match": {"cursor": {"$exists": true}}}
- ])
- .toArray();
- assert.eq(activeCursors.length, 1, tojson(activeCursors));
- const cursorObject = activeCursors[0].cursor;
- assert.eq(cursorObject.originatingCommand.find, coll.getName(), tojson(activeCursors));
- assert.eq(cursorObject.nDocsReturned, 2, tojson(activeCursors));
- assert.eq(cursorObject.tailable, false, tojson(activeCursors));
- assert.eq(cursorObject.awaitData, false, tojson(activeCursors));
- }
- const conn = MongoRunner.runMongod({});
- let failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
- withPinnedCursor({
- conn: conn,
- sessionId: null,
- db: conn.getDB("test"),
- assertFunction: runTest,
- runGetMoreFunc: function() {
- const response =
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: collName}));
- },
- failPointName: failPointName,
- assertEndCounts: true
- });
+function runTest(cursorId, coll) {
+ const db = coll.getDB();
+ const adminDB = db.getSiblingDB("admin");
+ // Test that active cursors do not show up as idle cursors.
+ const idleCursors =
+ adminDB
+ .aggregate([
+ {"$currentOp": {"localOps": true, "idleCursors": true, "allUsers": false}},
+ {"$match": {"type": "idleCursor"}}
+ ])
+ .toArray();
+ assert.eq(idleCursors.length, 0, tojson(idleCursors));
+ // Test that an active cursor shows up in currentOp.
+ const activeCursors =
+ adminDB
+ .aggregate([
+ {"$currentOp": {"localOps": true, "idleCursors": false, "allUsers": false}},
+ {"$match": {"cursor": {"$exists": true}}}
+ ])
+ .toArray();
+ assert.eq(activeCursors.length, 1, tojson(activeCursors));
+ const cursorObject = activeCursors[0].cursor;
+ assert.eq(cursorObject.originatingCommand.find, coll.getName(), tojson(activeCursors));
+ assert.eq(cursorObject.nDocsReturned, 2, tojson(activeCursors));
+ assert.eq(cursorObject.tailable, false, tojson(activeCursors));
+ assert.eq(cursorObject.awaitData, false, tojson(activeCursors));
+}
+const conn = MongoRunner.runMongod({});
+let failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
+withPinnedCursor({
+ conn: conn,
+ sessionId: null,
+ db: conn.getDB("test"),
+ assertFunction: runTest,
+ runGetMoreFunc: function() {
+ const response =
+ assert.commandWorked(db.runCommand({getMore: cursorId, collection: collName}));
+ },
+ failPointName: failPointName,
+ assertEndCounts: true
+});
- // Test OP_GET_MORE (legacy read mode) against a mongod.
- failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
- const db = conn.getDB("test");
- db.getMongo().forceReadMode("legacy");
- withPinnedCursor({
- conn: conn,
- sessionId: null,
- db: db,
- assertFunction: runTest,
- runGetMoreFunc: function() {
- db.getMongo().forceReadMode("legacy");
- let cmdRes = {
- "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
- "ok": 1
- };
- let cursor = new DBCommandCursor(db, cmdRes, 2);
- cursor.itcount();
- },
- failPointName: failPointName,
- assertEndCounts: true
- });
- MongoRunner.stopMongod(conn);
+// Test OP_GET_MORE (legacy read mode) against a mongod.
+failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
+const db = conn.getDB("test");
+db.getMongo().forceReadMode("legacy");
+withPinnedCursor({
+ conn: conn,
+ sessionId: null,
+ db: db,
+ assertFunction: runTest,
+ runGetMoreFunc: function() {
+ db.getMongo().forceReadMode("legacy");
+ let cmdRes = {
+ "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
+ "ok": 1
+ };
+ let cursor = new DBCommandCursor(db, cmdRes, 2);
+ cursor.itcount();
+ },
+ failPointName: failPointName,
+ assertEndCounts: true
+});
+MongoRunner.stopMongod(conn);
- // Sharded test
- failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
- let st = new ShardingTest({shards: 2, mongos: 1});
- withPinnedCursor({
- conn: st.s,
- sessionId: null,
- db: st.s.getDB("test"),
- assertFunction: runTest,
- runGetMoreFunc: function() {
- const response =
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: collName}));
- },
- failPointName: failPointName,
- assertEndCounts: true
- });
-
- // Test OP_GET_MORE (legacy reead mode) against a mongos.
- withPinnedCursor({
- conn: st.s,
- sessionId: null,
- db: st.s.getDB("test"),
- assertFunction: runTest,
- runGetMoreFunc: function() {
- db.getMongo().forceReadMode("legacy");
- let cmdRes = {
- "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
- "ok": 1
- };
- let cursor = new DBCommandCursor(db, cmdRes, 2);
- cursor.itcount();
-
- },
- failPointName: failPointName,
- assertEndCounts: true
- });
- st.stop();
+// Sharded test
+failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+let st = new ShardingTest({shards: 2, mongos: 1});
+withPinnedCursor({
+ conn: st.s,
+ sessionId: null,
+ db: st.s.getDB("test"),
+ assertFunction: runTest,
+ runGetMoreFunc: function() {
+ const response =
+ assert.commandWorked(db.runCommand({getMore: cursorId, collection: collName}));
+ },
+ failPointName: failPointName,
+ assertEndCounts: true
+});
+// Test OP_GET_MORE (legacy reead mode) against a mongos.
+withPinnedCursor({
+ conn: st.s,
+ sessionId: null,
+ db: st.s.getDB("test"),
+ assertFunction: runTest,
+ runGetMoreFunc: function() {
+ db.getMongo().forceReadMode("legacy");
+ let cmdRes = {
+ "cursor": {"firstBatch": [], "id": cursorId, "ns": db.jstest_with_pinned_cursor},
+ "ok": 1
+ };
+ let cursor = new DBCommandCursor(db, cmdRes, 2);
+ cursor.itcount();
+ },
+ failPointName: failPointName,
+ assertEndCounts: true
+});
+st.stop();
})();
diff --git a/jstests/noPassthrough/currentop_active_transaction.js b/jstests/noPassthrough/currentop_active_transaction.js
index 01d11a367b8..f7e1d5bee78 100644
--- a/jstests/noPassthrough/currentop_active_transaction.js
+++ b/jstests/noPassthrough/currentop_active_transaction.js
@@ -5,188 +5,183 @@
*/
(function() {
- 'use strict';
- load("jstests/libs/parallel_shell_helpers.js");
-
- function transactionFn(isPrepared) {
- const collName = 'currentop_active_transaction';
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase('test');
-
- session.startTransaction({readConcern: {level: 'snapshot'}});
- sessionDB[collName].update({}, {x: 2});
- if (isPrepared) {
- // Load the prepare helpers to be called in the parallel shell.
- load('jstests/core/txns/libs/prepare_helpers.js');
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- } else {
- assert.commandWorked(session.commitTransaction_forTesting());
- }
- }
+'use strict';
+load("jstests/libs/parallel_shell_helpers.js");
- function checkCurrentOpFields(currentOp,
- isPrepared,
- operationTime,
- timeBeforeTransactionStarts,
- timeAfterTransactionStarts,
- timeBeforeCurrentOp) {
- const transactionDocument = currentOp[0].transaction;
- assert.eq(transactionDocument.parameters.autocommit,
- false,
- "Expected 'autocommit' to be false but got " +
- transactionDocument.parameters.autocommit + " instead: " +
- tojson(transactionDocument));
- assert.docEq(transactionDocument.parameters.readConcern,
- {level: 'snapshot'},
- "Expected 'readConcern' to be level: snapshot but got " +
- tojson(transactionDocument.parameters.readConcern) + " instead: " +
- tojson(transactionDocument));
- assert.gte(transactionDocument.readTimestamp,
- operationTime,
- "Expected 'readTimestamp' to be at least " + operationTime + " but got " +
- transactionDocument.readTimestamp + " instead: " +
- tojson(transactionDocument));
- assert.gte(ISODate(transactionDocument.startWallClockTime),
- timeBeforeTransactionStarts,
- "Expected 'startWallClockTime' to be at least" + timeBeforeTransactionStarts +
- " but got " + transactionDocument.startWallClockTime + " instead: " +
- tojson(transactionDocument));
- const expectedTimeOpen = (timeBeforeCurrentOp - timeAfterTransactionStarts) * 1000;
- assert.gt(transactionDocument.timeOpenMicros,
- expectedTimeOpen,
- "Expected 'timeOpenMicros' to be at least" + expectedTimeOpen + " but got " +
- transactionDocument.timeOpenMicros + " instead: " +
- tojson(transactionDocument));
- assert.gte(transactionDocument.timeActiveMicros,
- 0,
- "Expected 'timeActiveMicros' to be at least 0: " + tojson(transactionDocument));
+function transactionFn(isPrepared) {
+ const collName = 'currentop_active_transaction';
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase('test');
+
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ sessionDB[collName].update({}, {x: 2});
+ if (isPrepared) {
+ // Load the prepare helpers to be called in the parallel shell.
+ load('jstests/core/txns/libs/prepare_helpers.js');
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else {
+ assert.commandWorked(session.commitTransaction_forTesting());
+ }
+}
+
+function checkCurrentOpFields(currentOp,
+ isPrepared,
+ operationTime,
+ timeBeforeTransactionStarts,
+ timeAfterTransactionStarts,
+ timeBeforeCurrentOp) {
+ const transactionDocument = currentOp[0].transaction;
+ assert.eq(transactionDocument.parameters.autocommit,
+ false,
+ "Expected 'autocommit' to be false but got " +
+ transactionDocument.parameters.autocommit +
+ " instead: " + tojson(transactionDocument));
+ assert.docEq(transactionDocument.parameters.readConcern,
+ {level: 'snapshot'},
+ "Expected 'readConcern' to be level: snapshot but got " +
+ tojson(transactionDocument.parameters.readConcern) +
+ " instead: " + tojson(transactionDocument));
+ assert.gte(transactionDocument.readTimestamp,
+ operationTime,
+ "Expected 'readTimestamp' to be at least " + operationTime + " but got " +
+ transactionDocument.readTimestamp + " instead: " + tojson(transactionDocument));
+ assert.gte(ISODate(transactionDocument.startWallClockTime),
+ timeBeforeTransactionStarts,
+ "Expected 'startWallClockTime' to be at least" + timeBeforeTransactionStarts +
+ " but got " + transactionDocument.startWallClockTime +
+ " instead: " + tojson(transactionDocument));
+ const expectedTimeOpen = (timeBeforeCurrentOp - timeAfterTransactionStarts) * 1000;
+ assert.gt(transactionDocument.timeOpenMicros,
+ expectedTimeOpen,
+ "Expected 'timeOpenMicros' to be at least" + expectedTimeOpen + " but got " +
+ transactionDocument.timeOpenMicros + " instead: " + tojson(transactionDocument));
+ assert.gte(transactionDocument.timeActiveMicros,
+ 0,
+ "Expected 'timeActiveMicros' to be at least 0: " + tojson(transactionDocument));
+ assert.gte(transactionDocument.timeInactiveMicros,
+ 0,
+ "Expected 'timeInactiveMicros' to be at least 0: " + tojson(transactionDocument));
+ const actualExpiryTime = ISODate(transactionDocument.expiryTime).getTime();
+ const expectedExpiryTime =
+ ISODate(transactionDocument.startWallClockTime).getTime() + transactionLifeTime * 1000;
+ assert.eq(expectedExpiryTime,
+ actualExpiryTime,
+ "Expected 'expiryTime' to be " + expectedExpiryTime + " but got " + actualExpiryTime +
+ " instead: " + tojson(transactionDocument));
+ if (isPrepared) {
assert.gte(
- transactionDocument.timeInactiveMicros,
+ transactionDocument.timePreparedMicros,
0,
- "Expected 'timeInactiveMicros' to be at least 0: " + tojson(transactionDocument));
- const actualExpiryTime = ISODate(transactionDocument.expiryTime).getTime();
- const expectedExpiryTime =
- ISODate(transactionDocument.startWallClockTime).getTime() + transactionLifeTime * 1000;
- assert.eq(expectedExpiryTime,
- actualExpiryTime,
- "Expected 'expiryTime' to be " + expectedExpiryTime + " but got " +
- actualExpiryTime + " instead: " + tojson(transactionDocument));
- if (isPrepared) {
- assert.gte(
- transactionDocument.timePreparedMicros,
- 0,
- "Expected 'timePreparedMicros' to be at least 0: " + tojson(transactionDocument));
- }
+ "Expected 'timePreparedMicros' to be at least 0: " + tojson(transactionDocument));
}
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const collName = 'currentop_active_transaction';
- const testDB = rst.getPrimary().getDB('test');
- const adminDB = rst.getPrimary().getDB('admin');
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
-
- // Run an operation prior to starting the transaction and save its operation time. We will use
- // this later to assert that our subsequent transaction's readTimestamp is greater than or equal
- // to this operation time.
- let res = assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 1}]}));
-
- // Set and save the transaction's lifetime. We will use this later to assert that our
- // transaction's expiry time is equal to its start time + lifetime.
- const transactionLifeTime = 10;
- assert.commandWorked(testDB.adminCommand(
- {setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
-
- // This will make the transaction hang.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterSettingPrepareStartTime', mode: 'alwaysOn'}));
-
- let timeBeforeTransactionStarts = new ISODate();
- let isPrepared = true;
- const joinPreparedTransaction =
- startParallelShell(funWithArgs(transactionFn, isPrepared), rst.ports[0]);
-
- const prepareTransactionFilter = {
- active: true,
- 'lsid': {$exists: true},
- 'transaction.parameters.txnNumber': {$eq: 0},
- 'transaction.parameters.autocommit': {$eq: false},
- 'transaction.timePreparedMicros': {$exists: true}
- };
-
- // Keep running currentOp() until we see the transaction subdocument.
- assert.soon(function() {
- return 1 ===
- adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).itcount();
- });
-
- let timeAfterTransactionStarts = new ISODate();
- // Sleep here to allow some time between timeAfterTransactionStarts and timeBeforeCurrentOp to
- // elapse.
- sleep(100);
- let timeBeforeCurrentOp = new ISODate();
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- let currentOp =
- adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).toArray();
- checkCurrentOpFields(currentOp,
- isPrepared,
- res.operationTime,
- timeBeforeTransactionStarts,
- timeAfterTransactionStarts,
- timeBeforeCurrentOp);
-
- // Now the transaction can proceed.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAfterSettingPrepareStartTime', mode: 'off'}));
- joinPreparedTransaction();
-
- // Conduct the same test but with a non-prepared transaction.
- res = assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 1}]}));
-
- // This will make the transaction hang.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'alwaysOn'}));
-
- timeBeforeTransactionStarts = new ISODate();
- isPrepared = false;
- const joinTransaction =
- startParallelShell(funWithArgs(transactionFn, isPrepared), rst.ports[0]);
-
- const transactionFilter = {
- active: true,
- 'lsid': {$exists: true},
- 'transaction.parameters.txnNumber': {$eq: 0},
- 'transaction.parameters.autocommit': {$eq: false},
- 'transaction.timePreparedMicros': {$exists: false}
- };
-
- // Keep running currentOp() until we see the transaction subdocument.
- assert.soon(function() {
- return 1 === adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).itcount();
- });
-
- timeAfterTransactionStarts = new ISODate();
- // Sleep here to allow some time between timeAfterTransactionStarts and timeBeforeCurrentOp to
- // elapse.
- sleep(100);
- timeBeforeCurrentOp = new ISODate();
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).toArray();
- checkCurrentOpFields(currentOp,
- isPrepared,
- res.operationTime,
- timeBeforeTransactionStarts,
- timeAfterTransactionStarts,
- timeBeforeCurrentOp);
-
- // Now the transaction can proceed.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'off'}));
- joinTransaction();
-
- rst.stopSet();
+}
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const collName = 'currentop_active_transaction';
+const testDB = rst.getPrimary().getDB('test');
+const adminDB = rst.getPrimary().getDB('admin');
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
+
+// Run an operation prior to starting the transaction and save its operation time. We will use
+// this later to assert that our subsequent transaction's readTimestamp is greater than or equal
+// to this operation time.
+let res = assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 1}]}));
+
+// Set and save the transaction's lifetime. We will use this later to assert that our
+// transaction's expiry time is equal to its start time + lifetime.
+const transactionLifeTime = 10;
+assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, transactionLifetimeLimitSeconds: transactionLifeTime}));
+
+// This will make the transaction hang.
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'hangAfterSettingPrepareStartTime', mode: 'alwaysOn'}));
+
+let timeBeforeTransactionStarts = new ISODate();
+let isPrepared = true;
+const joinPreparedTransaction =
+ startParallelShell(funWithArgs(transactionFn, isPrepared), rst.ports[0]);
+
+const prepareTransactionFilter = {
+ active: true,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0},
+ 'transaction.parameters.autocommit': {$eq: false},
+ 'transaction.timePreparedMicros': {$exists: true}
+};
+
+// Keep running currentOp() until we see the transaction subdocument.
+assert.soon(function() {
+ return 1 ===
+ adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).itcount();
+});
+
+let timeAfterTransactionStarts = new ISODate();
+// Sleep here to allow some time between timeAfterTransactionStarts and timeBeforeCurrentOp to
+// elapse.
+sleep(100);
+let timeBeforeCurrentOp = new ISODate();
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).toArray();
+checkCurrentOpFields(currentOp,
+ isPrepared,
+ res.operationTime,
+ timeBeforeTransactionStarts,
+ timeAfterTransactionStarts,
+ timeBeforeCurrentOp);
+
+// Now the transaction can proceed.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingPrepareStartTime', mode: 'off'}));
+joinPreparedTransaction();
+
+// Conduct the same test but with a non-prepared transaction.
+res = assert.commandWorked(testDB.runCommand({insert: collName, documents: [{x: 1}]}));
+
+// This will make the transaction hang.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'alwaysOn'}));
+
+timeBeforeTransactionStarts = new ISODate();
+isPrepared = false;
+const joinTransaction = startParallelShell(funWithArgs(transactionFn, isPrepared), rst.ports[0]);
+
+const transactionFilter = {
+ active: true,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0},
+ 'transaction.parameters.autocommit': {$eq: false},
+ 'transaction.timePreparedMicros': {$exists: false}
+};
+
+// Keep running currentOp() until we see the transaction subdocument.
+assert.soon(function() {
+ return 1 === adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).itcount();
+});
+
+timeAfterTransactionStarts = new ISODate();
+// Sleep here to allow some time between timeAfterTransactionStarts and timeBeforeCurrentOp to
+// elapse.
+sleep(100);
+timeBeforeCurrentOp = new ISODate();
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).toArray();
+checkCurrentOpFields(currentOp,
+ isPrepared,
+ res.operationTime,
+ timeBeforeTransactionStarts,
+ timeAfterTransactionStarts,
+ timeBeforeCurrentOp);
+
+// Now the transaction can proceed.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'off'}));
+joinTransaction();
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js b/jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js
index e02beac5366..25a5857d851 100644
--- a/jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js
+++ b/jstests/noPassthrough/currentop_inactive_transaction_includes_last_client_info.js
@@ -6,62 +6,66 @@
*/
(function() {
- 'use strict';
+'use strict';
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const collName = 'currentop_last_client_info';
- const dbName = 'test';
- const testDB = rst.getPrimary().getDB(dbName);
- const adminDB = rst.getPrimary().getDB('admin');
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
+const collName = 'currentop_last_client_info';
+const dbName = 'test';
+const testDB = rst.getPrimary().getDB(dbName);
+const adminDB = rst.getPrimary().getDB('admin');
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
- // Start a new Session.
- const lsid = assert.commandWorked(testDB.runCommand({startSession: 1})).id;
- const txnNumber = NumberLong(0);
- assert.commandWorked(testDB.runCommand({
- find: collName,
- lsid: lsid,
- txnNumber: txnNumber,
- readConcern: {level: "snapshot"},
- startTransaction: true,
- autocommit: false
- }));
+// Start a new Session.
+const lsid = assert.commandWorked(testDB.runCommand({startSession: 1})).id;
+const txnNumber = NumberLong(0);
+assert.commandWorked(testDB.runCommand({
+ find: collName,
+ lsid: lsid,
+ txnNumber: txnNumber,
+ readConcern: {level: "snapshot"},
+ startTransaction: true,
+ autocommit: false
+}));
- const currentOpFilter = {active: false, 'lsid.id': {$eq: lsid.id}, 'client': {$exists: true}};
+const currentOpFilter = {
+ active: false,
+ 'lsid.id': {$eq: lsid.id},
+ 'client': {$exists: true}
+};
- let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: currentOpFilter}]).toArray();
- assert.eq(currentOp.length, 1);
+let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: currentOpFilter}]).toArray();
+assert.eq(currentOp.length, 1);
- let currentOpEntry = currentOp[0];
- const connectionId = currentOpEntry.connectionId;
- // Check that the currentOp object contains information about the last client that has run an
- // operation and that its values align with our expectations.
- assert.eq(currentOpEntry.appName, "MongoDB Shell");
- assert.eq(currentOpEntry.clientMetadata.application.name, "MongoDB Shell");
- assert.eq(currentOpEntry.clientMetadata.driver.name, "MongoDB Internal Client");
+let currentOpEntry = currentOp[0];
+const connectionId = currentOpEntry.connectionId;
+// Check that the currentOp object contains information about the last client that has run an
+// operation and that its values align with our expectations.
+assert.eq(currentOpEntry.appName, "MongoDB Shell");
+assert.eq(currentOpEntry.clientMetadata.application.name, "MongoDB Shell");
+assert.eq(currentOpEntry.clientMetadata.driver.name, "MongoDB Internal Client");
- // Create a new Client and run another operation on the same session.
- const otherClient = new Mongo(rst.getPrimary().host);
- assert.commandWorked(otherClient.getDB(dbName).runCommand(
- {find: collName, lsid: lsid, txnNumber: txnNumber, autocommit: false}));
+// Create a new Client and run another operation on the same session.
+const otherClient = new Mongo(rst.getPrimary().host);
+assert.commandWorked(otherClient.getDB(dbName).runCommand(
+ {find: collName, lsid: lsid, txnNumber: txnNumber, autocommit: false}));
- currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: currentOpFilter}]).toArray();
- currentOpEntry = currentOp[0];
- // Check that the last client that has ran an operation against this session has a different
- // connectionId than the previous client.
- assert.neq(currentOpEntry.connectionId, connectionId);
+currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: currentOpFilter}]).toArray();
+currentOpEntry = currentOp[0];
+// Check that the last client that has ran an operation against this session has a different
+// connectionId than the previous client.
+assert.neq(currentOpEntry.connectionId, connectionId);
- assert.commandWorked(testDB.adminCommand({
- commitTransaction: 1,
- lsid: lsid,
- txnNumber: txnNumber,
- autocommit: false,
- writeConcern: {w: 'majority'}
- }));
+assert.commandWorked(testDB.adminCommand({
+ commitTransaction: 1,
+ lsid: lsid,
+ txnNumber: txnNumber,
+ autocommit: false,
+ writeConcern: {w: 'majority'}
+}));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/currentop_includes_await_time.js b/jstests/noPassthrough/currentop_includes_await_time.js
index 3a5ad0bca4f..5a5dee2f5ce 100644
--- a/jstests/noPassthrough/currentop_includes_await_time.js
+++ b/jstests/noPassthrough/currentop_includes_await_time.js
@@ -4,50 +4,50 @@
* @tags: [requires_capped]
*/
(function() {
- "use test";
-
- // This test runs a getMore in a parallel shell, which will not inherit the implicit session of
- // the cursor establishing command.
- TestData.disableImplicitSessions = true;
-
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
- const coll = testDB.currentop_includes_await_time;
-
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName(), {capped: true, size: 1024}));
- assert.writeOK(coll.insert({_id: 1}));
-
- let cmdRes = assert.commandWorked(
- testDB.runCommand({find: coll.getName(), tailable: true, awaitData: true}));
-
- TestData.commandResult = cmdRes;
- let cleanupShell = startParallelShell(function() {
- db.getSiblingDB("test").runCommand({
- getMore: TestData.commandResult.cursor.id,
- collection: "currentop_includes_await_time",
- maxTimeMS: 5 * 60 * 1000,
- });
- }, conn.port);
-
- assert.soon(function() {
- // This filter ensures that the getMore 'secs_running' and 'microsecs_running' fields are
- // sufficiently large that they appear to include time spent blocking waiting for capped
- // inserts.
- let ops = testDB.currentOp({
- "command.getMore": {$exists: true},
- "ns": coll.getFullName(),
- secs_running: {$gte: 2},
- microsecs_running: {$gte: 2 * 1000 * 1000}
- });
- return ops.inprog.length === 1;
- }, printjson(testDB.currentOp()));
-
- // A capped insertion should unblock the getMore, allowing the test to complete before the
- // getMore's awaitData time expires.
- assert.writeOK(coll.insert({_id: 2}));
-
- cleanupShell();
- MongoRunner.stopMongod(conn);
+"use test";
+
+// This test runs a getMore in a parallel shell, which will not inherit the implicit session of
+// the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+const testDB = conn.getDB("test");
+const coll = testDB.currentop_includes_await_time;
+
+coll.drop();
+assert.commandWorked(testDB.createCollection(coll.getName(), {capped: true, size: 1024}));
+assert.writeOK(coll.insert({_id: 1}));
+
+let cmdRes = assert.commandWorked(
+ testDB.runCommand({find: coll.getName(), tailable: true, awaitData: true}));
+
+TestData.commandResult = cmdRes;
+let cleanupShell = startParallelShell(function() {
+ db.getSiblingDB("test").runCommand({
+ getMore: TestData.commandResult.cursor.id,
+ collection: "currentop_includes_await_time",
+ maxTimeMS: 5 * 60 * 1000,
+ });
+}, conn.port);
+
+assert.soon(function() {
+ // This filter ensures that the getMore 'secs_running' and 'microsecs_running' fields are
+ // sufficiently large that they appear to include time spent blocking waiting for capped
+ // inserts.
+ let ops = testDB.currentOp({
+ "command.getMore": {$exists: true},
+ "ns": coll.getFullName(),
+ secs_running: {$gte: 2},
+ microsecs_running: {$gte: 2 * 1000 * 1000}
+ });
+ return ops.inprog.length === 1;
+}, printjson(testDB.currentOp()));
+
+// A capped insertion should unblock the getMore, allowing the test to complete before the
+// getMore's awaitData time expires.
+assert.writeOK(coll.insert({_id: 2}));
+
+cleanupShell();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js
index b2aa9b2284f..15e655d568a 100644
--- a/jstests/noPassthrough/currentop_query.js
+++ b/jstests/noPassthrough/currentop_query.js
@@ -4,648 +4,632 @@
* @tags: [requires_replication, requires_sharding]
*/
(function() {
- "use strict";
-
- // This test runs manual getMores using different connections, which will not inherit the
- // implicit session of the cursor establishing command.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Set up a 2-shard cluster. Configure 'internalQueryExecYieldIterations' on both shards such
- // that operations will yield on each PlanExecuter iteration.
- const st = new ShardingTest({
- name: jsTestName(),
- shards: 2,
- rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
- });
-
- // Obtain one mongoS connection and a second direct to the shard.
- const rsConn = st.rs0.getPrimary();
- const mongosConn = st.s;
-
- const mongosDB = mongosConn.getDB("currentop_query");
- const mongosColl = mongosDB.currentop_query;
-
- // Enable sharding on the the test database and ensure that the primary is on shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), rsConn.name);
-
- // On a sharded cluster, aggregations which are dispatched to multiple shards first establish
- // zero-batch cursors and only hit the failpoints on the following getMore. This helper takes a
- // generic command object and creates an appropriate filter given the use-case.
- function commandOrOriginatingCommand(cmdObj, isRemoteShardCurOp) {
- const cmdFieldName = (isRemoteShardCurOp ? "cursor.originatingCommand" : "command");
- const cmdFilter = {};
- for (let subFieldName in cmdObj) {
- cmdFilter[`${cmdFieldName}.${subFieldName}`] = cmdObj[subFieldName];
- }
- return cmdFilter;
+"use strict";
+
+// This test runs manual getMores using different connections, which will not inherit the
+// implicit session of the cursor establishing command.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+// Set up a 2-shard cluster. Configure 'internalQueryExecYieldIterations' on both shards such
+// that operations will yield on each PlanExecuter iteration.
+const st = new ShardingTest({
+ name: jsTestName(),
+ shards: 2,
+ rs: {nodes: 1, setParameter: {internalQueryExecYieldIterations: 1}}
+});
+
+// Obtain one mongoS connection and a second direct to the shard.
+const rsConn = st.rs0.getPrimary();
+const mongosConn = st.s;
+
+const mongosDB = mongosConn.getDB("currentop_query");
+const mongosColl = mongosDB.currentop_query;
+
+// Enable sharding on the the test database and ensure that the primary is on shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), rsConn.name);
+
+// On a sharded cluster, aggregations which are dispatched to multiple shards first establish
+// zero-batch cursors and only hit the failpoints on the following getMore. This helper takes a
+// generic command object and creates an appropriate filter given the use-case.
+function commandOrOriginatingCommand(cmdObj, isRemoteShardCurOp) {
+ const cmdFieldName = (isRemoteShardCurOp ? "cursor.originatingCommand" : "command");
+ const cmdFilter = {};
+ for (let subFieldName in cmdObj) {
+ cmdFilter[`${cmdFieldName}.${subFieldName}`] = cmdObj[subFieldName];
+ }
+ return cmdFilter;
+}
+
+// Drops and re-creates the sharded test collection.
+function dropAndRecreateTestCollection() {
+ assert(mongosColl.drop());
+ assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: "hashed"}}));
+}
+
+/**
+ * @param {connection} conn - The connection through which to run the test suite.
+ * @param {string} readMode - The read mode to use for the parallel shell. This allows
+ * testing currentOp() output for both OP_QUERY and OP_GET_MORE queries, as well as "find" and
+ * "getMore" commands.
+ * @params {function} currentOp - Function which takes a database object and a filter, and
+ * returns an array of matching current operations. This allows us to test output for both the
+ * currentOp command and the $currentOp aggregation stage.
+ * @params {boolean} truncatedOps - if true, we expect operations that exceed the maximum
+ * currentOp size to be truncated in the output 'command' field, and we run only a subset of
+ * tests designed to exercise that scenario. If false, we expect the entire operation to be
+ * returned.
+ * @params {boolean} localOps - if true, we expect currentOp to return operations running on a
+ * mongoS itself rather than on the shards.
+ */
+function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
+ const testDB = conn.getDB("currentop_query");
+ const coll = testDB.currentop_query;
+ dropAndRecreateTestCollection();
+
+ for (let i = 0; i < 5; ++i) {
+ assert.writeOK(coll.insert({_id: i, a: i}));
}
- // Drops and re-creates the sharded test collection.
- function dropAndRecreateTestCollection() {
- assert(mongosColl.drop());
- assert.commandWorked(mongosDB.adminCommand(
- {shardCollection: mongosColl.getFullName(), key: {_id: "hashed"}}));
+ const isLocalMongosCurOp = (FixtureHelpers.isMongos(testDB) && localOps);
+ const isRemoteShardCurOp = (FixtureHelpers.isMongos(testDB) && !localOps);
+
+ // If 'truncatedOps' is true, run only the subset of tests designed to validate the
+ // truncation behaviour. Otherwise, run the standard set of tests which assume that
+ // truncation will not occur.
+ if (truncatedOps) {
+ runTruncationTests();
+ } else {
+ runStandardTests();
}
/**
- * @param {connection} conn - The connection through which to run the test suite.
- * @param {string} readMode - The read mode to use for the parallel shell. This allows
- * testing currentOp() output for both OP_QUERY and OP_GET_MORE queries, as well as "find" and
- * "getMore" commands.
- * @params {function} currentOp - Function which takes a database object and a filter, and
- * returns an array of matching current operations. This allows us to test output for both the
- * currentOp command and the $currentOp aggregation stage.
- * @params {boolean} truncatedOps - if true, we expect operations that exceed the maximum
- * currentOp size to be truncated in the output 'command' field, and we run only a subset of
- * tests designed to exercise that scenario. If false, we expect the entire operation to be
- * returned.
- * @params {boolean} localOps - if true, we expect currentOp to return operations running on a
- * mongoS itself rather than on the shards.
+ * Captures currentOp() for a given test command/operation and confirms that namespace,
+ * operation type and planSummary are correct.
+ *
+ * @param {Object} testObj - Contains test arguments.
+ * @param {function} testObj.test - A function that runs the desired test op/cmd.
+ * @param {string} testObj.planSummary - A string containing the expected planSummary.
+ * @param {Object} testObj.currentOpFilter - A filter to be used to narrow currentOp()
+ * output to only the relevant operation or command.
+ * @param {string} [testObj.command] - The command to test against. Will look for this to
+ * be a key in the currentOp().query object.
+ * @param {string} [testObj.operation] - The operation to test against. Will look for this
+ * to be the value of the currentOp().op field.
*/
- function runTests({conn, readMode, currentOp, truncatedOps, localOps}) {
- const testDB = conn.getDB("currentop_query");
- const coll = testDB.currentop_query;
- dropAndRecreateTestCollection();
-
- for (let i = 0; i < 5; ++i) {
- assert.writeOK(coll.insert({_id: i, a: i}));
+ function confirmCurrentOpContents(testObj) {
+ // Force queries to hang on yield to allow for currentOp capture.
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: conn.getDB("admin"),
+ cmdObj: {
+ configureFailPoint: "setYieldAllLocksHang",
+ mode: "alwaysOn",
+ data: {namespace: mongosColl.getFullName()}
+ }
+ });
+
+ // Set the test configuration in TestData for the parallel shell test.
+ TestData.shellReadMode = readMode;
+ TestData.currentOpTest = testObj.test;
+ TestData.currentOpCollName = "currentop_query";
+
+ // Wrapper function which sets the readMode and DB before running the test function
+ // found at TestData.currentOpTest.
+ function doTest() {
+ const testDB = db.getSiblingDB(TestData.currentOpCollName);
+ testDB.getMongo().forceReadMode(TestData.shellReadMode);
+ TestData.currentOpTest(testDB);
}
- const isLocalMongosCurOp = (FixtureHelpers.isMongos(testDB) && localOps);
- const isRemoteShardCurOp = (FixtureHelpers.isMongos(testDB) && !localOps);
+ // Run the operation in the background.
+ var awaitShell = startParallelShell(doTest, testDB.getMongo().port);
- // If 'truncatedOps' is true, run only the subset of tests designed to validate the
- // truncation behaviour. Otherwise, run the standard set of tests which assume that
- // truncation will not occur.
- if (truncatedOps) {
- runTruncationTests();
- } else {
- runStandardTests();
+ // Augment the currentOpFilter with additional known predicates.
+ if (!testObj.currentOpFilter.ns) {
+ testObj.currentOpFilter.ns = coll.getFullName();
+ }
+ if (!isLocalMongosCurOp) {
+ testObj.currentOpFilter.planSummary = testObj.planSummary;
+ }
+ if (testObj.hasOwnProperty("command")) {
+ testObj.currentOpFilter["command." + testObj.command] = {$exists: true};
+ } else if (testObj.hasOwnProperty("operation")) {
+ testObj.currentOpFilter.op = testObj.operation;
}
- /**
- * Captures currentOp() for a given test command/operation and confirms that namespace,
- * operation type and planSummary are correct.
- *
- * @param {Object} testObj - Contains test arguments.
- * @param {function} testObj.test - A function that runs the desired test op/cmd.
- * @param {string} testObj.planSummary - A string containing the expected planSummary.
- * @param {Object} testObj.currentOpFilter - A filter to be used to narrow currentOp()
- * output to only the relevant operation or command.
- * @param {string} [testObj.command] - The command to test against. Will look for this to
- * be a key in the currentOp().query object.
- * @param {string} [testObj.operation] - The operation to test against. Will look for this
- * to be the value of the currentOp().op field.
- */
- function confirmCurrentOpContents(testObj) {
- // Force queries to hang on yield to allow for currentOp capture.
- FixtureHelpers.runCommandOnEachPrimary({
- db: conn.getDB("admin"),
- cmdObj: {
- configureFailPoint: "setYieldAllLocksHang",
- mode: "alwaysOn",
- data: {namespace: mongosColl.getFullName()}
+ // Capture currentOp record for the query and confirm that the 'query' and 'planSummary'
+ // fields contain the content expected. We are indirectly testing the 'ns' field as well
+ // with the currentOp query argument.
+ assert.soon(
+ function() {
+ var result = currentOp(testDB, testObj.currentOpFilter, truncatedOps, localOps);
+ assert.commandWorked(result);
+
+ if (result.inprog.length > 0) {
+ result.inprog.forEach((op) => {
+ assert.eq(op.appName, "MongoDB Shell", tojson(result));
+ assert.eq(
+ op.clientMetadata.application.name, "MongoDB Shell", tojson(result));
+ });
+ return true;
}
- });
-
- // Set the test configuration in TestData for the parallel shell test.
- TestData.shellReadMode = readMode;
- TestData.currentOpTest = testObj.test;
- TestData.currentOpCollName = "currentop_query";
-
- // Wrapper function which sets the readMode and DB before running the test function
- // found at TestData.currentOpTest.
- function doTest() {
- const testDB = db.getSiblingDB(TestData.currentOpCollName);
- testDB.getMongo().forceReadMode(TestData.shellReadMode);
- TestData.currentOpTest(testDB);
- }
-
- // Run the operation in the background.
- var awaitShell = startParallelShell(doTest, testDB.getMongo().port);
- // Augment the currentOpFilter with additional known predicates.
- if (!testObj.currentOpFilter.ns) {
- testObj.currentOpFilter.ns = coll.getFullName();
- }
- if (!isLocalMongosCurOp) {
- testObj.currentOpFilter.planSummary = testObj.planSummary;
- }
- if (testObj.hasOwnProperty("command")) {
- testObj.currentOpFilter["command." + testObj.command] = {$exists: true};
- } else if (testObj.hasOwnProperty("operation")) {
- testObj.currentOpFilter.op = testObj.operation;
- }
-
- // Capture currentOp record for the query and confirm that the 'query' and 'planSummary'
- // fields contain the content expected. We are indirectly testing the 'ns' field as well
- // with the currentOp query argument.
- assert.soon(
- function() {
- var result = currentOp(testDB, testObj.currentOpFilter, truncatedOps, localOps);
- assert.commandWorked(result);
-
- if (result.inprog.length > 0) {
- result.inprog.forEach((op) => {
- assert.eq(op.appName, "MongoDB Shell", tojson(result));
- assert.eq(op.clientMetadata.application.name,
- "MongoDB Shell",
- tojson(result));
- });
- return true;
- }
-
- return false;
- },
- function() {
- return "Failed to find operation from " + tojson(testObj.currentOpFilter) +
- " in currentOp() output: " +
- tojson(currentOp(testDB, {}, truncatedOps, localOps)) +
- (isLocalMongosCurOp
- ? ", with localOps=false: " +
- tojson(currentOp(testDB, {}, truncatedOps, false))
- : "");
- });
-
- // Allow the query to complete.
- FixtureHelpers.runCommandOnEachPrimary({
- db: conn.getDB("admin"),
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
+ return false;
+ },
+ function() {
+ return "Failed to find operation from " + tojson(testObj.currentOpFilter) +
+ " in currentOp() output: " +
+ tojson(currentOp(testDB, {}, truncatedOps, localOps)) +
+ (isLocalMongosCurOp ? ", with localOps=false: " +
+ tojson(currentOp(testDB, {}, truncatedOps, false))
+ : "");
});
- awaitShell();
- delete TestData.currentOpCollName;
- delete TestData.currentOpTest;
- delete TestData.shellReadMode;
- }
+ // Allow the query to complete.
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: conn.getDB("admin"),
+ cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
+ });
- /**
- * Runs a set of tests to verify that the currentOp output appears as expected. These tests
- * assume that the 'truncateOps' parameter is false, so no command objects in the currentOp
- * output will be truncated to string.
- */
- function runStandardTests() {
- //
- // Confirm currentOp content for commands defined in 'testList'.
- //
- var testList = [
- {
- test: function(db) {
- assert.eq(db.currentop_query
- .aggregate([{$match: {a: 1, $comment: "currentop_query"}}], {
- collation: {locale: "fr"},
- hint: {_id: 1},
- comment: "currentop_query_2"
- })
- .itcount(),
- 1);
- },
- planSummary: "IXSCAN { _id: 1 }",
- currentOpFilter: commandOrOriginatingCommand({
- "aggregate": {$exists: true},
- "pipeline.0.$match.$comment": "currentop_query",
- "comment": "currentop_query_2",
- "collation": {locale: "fr"},
- "hint": {_id: 1}
- },
- isRemoteShardCurOp)
- },
- {
- test: function(db) {
- assert.eq(db.currentop_query.find({a: 1, $comment: "currentop_query"})
- .collation({locale: "fr"})
- .count(),
- 1);
- },
- command: "count",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.query.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- }
+ awaitShell();
+ delete TestData.currentOpCollName;
+ delete TestData.currentOpTest;
+ delete TestData.shellReadMode;
+ }
+
+ /**
+ * Runs a set of tests to verify that the currentOp output appears as expected. These tests
+ * assume that the 'truncateOps' parameter is false, so no command objects in the currentOp
+ * output will be truncated to string.
+ */
+ function runStandardTests() {
+ //
+ // Confirm currentOp content for commands defined in 'testList'.
+ //
+ var testList = [
+ {
+ test: function(db) {
+ assert.eq(db.currentop_query
+ .aggregate([{$match: {a: 1, $comment: "currentop_query"}}], {
+ collation: {locale: "fr"},
+ hint: {_id: 1},
+ comment: "currentop_query_2"
+ })
+ .itcount(),
+ 1);
},
- {
- test: function(db) {
- assert.eq(db.currentop_query.distinct("a",
- {a: 1, $comment: "currentop_query"},
- {collation: {locale: "fr"}}),
- [1]);
- },
- command: "distinct",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.query.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- }
+ planSummary: "IXSCAN { _id: 1 }",
+ currentOpFilter: commandOrOriginatingCommand({
+ "aggregate": {$exists: true},
+ "pipeline.0.$match.$comment": "currentop_query",
+ "comment": "currentop_query_2",
+ "collation": {locale: "fr"},
+ "hint": {_id: 1}
},
- {
- test: function(db) {
- assert.eq(
- db.currentop_query.find({a: 1}).comment("currentop_query").itcount(), 1);
- },
- command: "find",
- planSummary: "COLLSCAN",
- currentOpFilter: {"command.comment": "currentop_query"}
+ isRemoteShardCurOp)
+ },
+ {
+ test: function(db) {
+ assert.eq(db.currentop_query.find({a: 1, $comment: "currentop_query"})
+ .collation({locale: "fr"})
+ .count(),
+ 1);
},
- {
- test: function(db) {
- assert.eq(db.currentop_query.findAndModify({
- query: {_id: 1, a: 1, $comment: "currentop_query"},
- update: {$inc: {b: 1}},
- collation: {locale: "fr"}
- }),
- {"_id": 1, "a": 1});
- },
- command: "findandmodify",
- planSummary: "IXSCAN { _id: 1 }",
- currentOpFilter: {
- "command.query.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- }
+ command: "count",
+ planSummary: "COLLSCAN",
+ currentOpFilter: {
+ "command.query.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(
+ db.currentop_query.distinct(
+ "a", {a: 1, $comment: "currentop_query"}, {collation: {locale: "fr"}}),
+ [1]);
},
- {
- test: function(db) {
- assert.commandWorked(
- db.currentop_query.mapReduce(() => {},
- (a, b) => {},
- {
- query: {$comment: "currentop_query"},
- out: {inline: 1},
- }));
- },
- command: "mapreduce",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.query.$comment": "currentop_query",
- "ns": /^currentop_query.*currentop_query/
- }
+ command: "distinct",
+ planSummary: "COLLSCAN",
+ currentOpFilter: {
+ "command.query.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(db.currentop_query.find({a: 1}).comment("currentop_query").itcount(),
+ 1);
},
- {
- test: function(db) {
- assert.writeOK(db.currentop_query.remove({a: 2, $comment: "currentop_query"},
- {collation: {locale: "fr"}}));
- },
- operation: "remove",
- planSummary: "COLLSCAN",
- currentOpFilter:
- (isLocalMongosCurOp
- ? {"command.delete": coll.getName(), "command.ordered": true}
- : {
- "command.q.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- })
+ command: "find",
+ planSummary: "COLLSCAN",
+ currentOpFilter: {"command.comment": "currentop_query"}
+ },
+ {
+ test: function(db) {
+ assert.eq(db.currentop_query.findAndModify({
+ query: {_id: 1, a: 1, $comment: "currentop_query"},
+ update: {$inc: {b: 1}},
+ collation: {locale: "fr"}
+ }),
+ {"_id": 1, "a": 1});
},
- {
- test: function(db) {
- assert.writeOK(
- db.currentop_query.update({a: 1, $comment: "currentop_query"},
- {$inc: {b: 1}},
- {collation: {locale: "fr"}, multi: true}));
- },
- operation: "update",
- planSummary: "COLLSCAN",
- currentOpFilter:
- (isLocalMongosCurOp
- ? {"command.update": coll.getName(), "command.ordered": true}
- : {
- "command.q.$comment": "currentop_query",
- "command.collation": {locale: "fr"}
- })
+ command: "findandmodify",
+ planSummary: "IXSCAN { _id: 1 }",
+ currentOpFilter: {
+ "command.query.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
}
- ];
-
- testList.forEach(confirmCurrentOpContents);
-
- //
- // Confirm currentOp contains collation for find command.
- //
- if (readMode === "commands") {
- confirmCurrentOpContents({
- test: function(db) {
- assert.eq(db.currentop_query.find({a: 1})
- .comment("currentop_query")
- .collation({locale: "fr"})
- .itcount(),
- 1);
- },
- command: "find",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.comment": "currentop_query",
- "command.collation": {locale: "fr"}
- }
- });
- }
-
- //
- // Confirm currentOp content for the $geoNear aggregation stage.
- //
- dropAndRecreateTestCollection();
- for (let i = 0; i < 10; ++i) {
- assert.commandWorked(
- coll.insert({a: i, loc: {type: "Point", coordinates: [i, i]}}));
- }
- assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
- confirmCurrentOpContents({
+ },
+ {
test: function(db) {
- assert.commandWorked(db.runCommand({
- aggregate: "currentop_query",
- cursor: {},
- pipeline: [{
- $geoNear: {
- near: {type: "Point", coordinates: [1, 1]},
- distanceField: "dist",
- spherical: true,
- query: {$comment: "currentop_query"},
- }
- }],
- collation: {locale: "fr"},
- comment: "currentop_query",
+ assert.commandWorked(db.currentop_query.mapReduce(() => {}, (a, b) => {}, {
+ query: {$comment: "currentop_query"},
+ out: {inline: 1},
}));
},
- planSummary: "GEO_NEAR_2DSPHERE { loc: \"2dsphere\" }",
- currentOpFilter: commandOrOriginatingCommand({
- "aggregate": {$exists: true},
- "pipeline.0.$geoNear.query.$comment": "currentop_query",
- "collation": {locale: "fr"},
- "comment": "currentop_query",
- },
- isRemoteShardCurOp)
- });
-
- //
- // Confirm currentOp content for getMore. This case tests command and legacy getMore
- // with originating find and aggregate commands.
- //
- dropAndRecreateTestCollection();
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- const originatingCommands = {
- find:
- {find: "currentop_query", filter: {}, comment: "currentop_query", batchSize: 0},
- aggregate: {
- aggregate: "currentop_query",
- pipeline: [{$match: {}}],
- comment: "currentop_query",
- cursor: {batchSize: 0}
+ command: "mapreduce",
+ planSummary: "COLLSCAN",
+ currentOpFilter: {
+ "command.query.$comment": "currentop_query",
+ "ns": /^currentop_query.*currentop_query/
}
- };
-
- for (let cmdName in originatingCommands) {
- const cmdObj = originatingCommands[cmdName];
- const cmdRes = testDB.runCommand(cmdObj);
- assert.commandWorked(cmdRes);
-
- TestData.commandResult = cmdRes;
-
- // If this is a non-localOps test running via mongoS, then the cursorID we obtained
- // above is the ID of the mongoS cursor, and will not match the IDs of any of the
- // individual shard cursors in the currentOp output. We therefore don't perform an
- // exact match on 'command.getMore', but only verify that the cursor ID is non-zero.
- const filter = {
- "command.getMore":
- (isRemoteShardCurOp ? {$gt: 0} : TestData.commandResult.cursor.id),
- [`cursor.originatingCommand.${cmdName}`]:
- {$exists: true}, "cursor.originatingCommand.comment": "currentop_query"
- };
-
- confirmCurrentOpContents({
- test: function(db) {
- const cursor = new DBCommandCursor(db, TestData.commandResult, 5);
- assert.eq(cursor.itcount(), 10);
- },
- command: "getMore",
- planSummary: "COLLSCAN",
- currentOpFilter: filter
- });
-
- delete TestData.commandResult;
- }
-
- //
- // Confirm that currentOp displays upconverted getMore and originatingCommand in the
- // case of a legacy query.
- //
- if (readMode === "legacy") {
- let filter = {
- "command.getMore": {$gt: 0},
- "command.collection": "currentop_query",
- "command.batchSize": 2,
- "cursor.originatingCommand.find": "currentop_query",
- "cursor.originatingCommand.ntoreturn": 2,
- "cursor.originatingCommand.comment": "currentop_query"
- };
-
- confirmCurrentOpContents({
- test: function(db) {
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Temporarily disable hanging yields so that we can iterate the first
- // batch.
- FixtureHelpers.runCommandOnEachPrimary({
- db: db.getSiblingDB("admin"),
- cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
- });
-
- let cursor =
- db.currentop_query.find({}).comment("currentop_query").batchSize(2);
-
- // Exhaust the current batch so that the next request will force a getMore.
- while (cursor.objsLeftInBatch() > 0) {
- cursor.next();
- }
-
- // Set yields to hang so that we can check currentOp output.
- FixtureHelpers.runCommandOnEachPrimary({
- db: db.getSiblingDB("admin"),
- cmdObj: {
- configureFailPoint: "setYieldAllLocksHang",
- mode: "alwaysOn",
- data: {namespace: db.currentop_query.getFullName()}
- }
- });
-
- assert.eq(cursor.itcount(), 8);
- },
- operation: "getmore",
- planSummary: "COLLSCAN",
- currentOpFilter: filter
- });
+ },
+ {
+ test: function(db) {
+ assert.writeOK(db.currentop_query.remove({a: 2, $comment: "currentop_query"},
+ {collation: {locale: "fr"}}));
+ },
+ operation: "remove",
+ planSummary: "COLLSCAN",
+ currentOpFilter: (isLocalMongosCurOp
+ ? {"command.delete": coll.getName(), "command.ordered": true}
+ : {
+ "command.q.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
+ })
+ },
+ {
+ test: function(db) {
+ assert.writeOK(
+ db.currentop_query.update({a: 1, $comment: "currentop_query"},
+ {$inc: {b: 1}},
+ {collation: {locale: "fr"}, multi: true}));
+ },
+ operation: "update",
+ planSummary: "COLLSCAN",
+ currentOpFilter: (isLocalMongosCurOp
+ ? {"command.update": coll.getName(), "command.ordered": true}
+ : {
+ "command.q.$comment": "currentop_query",
+ "command.collation": {locale: "fr"}
+ })
}
+ ];
- //
- // Confirm that a legacy query whose filter contains a field named 'query' appears as
- // expected in currentOp. This test ensures that upconverting a legacy query correctly
- // identifies this as a user field rather than a wrapped filter spec.
- //
- if (readMode === "legacy") {
- confirmCurrentOpContents({
- test: function(db) {
- assert.eq(
- db.currentop_query.find({query: "foo", $comment: "currentop_query"})
- .itcount(),
- 0);
- },
- command: "find",
- planSummary: "COLLSCAN",
- currentOpFilter: {
- "command.filter.$comment": "currentop_query",
- "command.filter.query": "foo"
- }
- });
- }
- }
-
- /**
- * Runs a set of tests to verify that currentOp will serialize objects exceeding ~1000 bytes
- * to string when the 'truncateOps' parameter is set.
- */
- function runTruncationTests() {
- dropAndRecreateTestCollection();
- assert.writeOK(coll.insert({a: 1}));
-
- // When the currentOp command serializes the query object as a string, individual string
- // values inside it are truncated at 150 characters. To test "total length" truncation
- // we need to pass multiple values, each smaller than 150 bytes.
- TestData.queryFilter = {
- "1": "1".repeat(149),
- "2": "2".repeat(149),
- "3": "3".repeat(149),
- "4": "4".repeat(149),
- "5": "5".repeat(149),
- "6": "6".repeat(149),
- "7": "7".repeat(149),
- };
-
- var truncatedQueryString = "^\\{ find: \"currentop_query\", filter: \\{ " +
- "1: \"1{149}\", 2: \"2{149}\", 3: \"3{149}\", 4: \"4{149}\", 5: \"5{149}\", " +
- "6: \"6{149}\", 7: \"7+\\.\\.\\.";
-
- let currentOpFilter;
-
- currentOpFilter = {
- "command.$truncated": {$regex: truncatedQueryString},
- "command.comment": "currentop_query"
- };
+ testList.forEach(confirmCurrentOpContents);
+ //
+ // Confirm currentOp contains collation for find command.
+ //
+ if (readMode === "commands") {
confirmCurrentOpContents({
test: function(db) {
- assert.eq(db.currentop_query.find(TestData.queryFilter)
+ assert.eq(db.currentop_query.find({a: 1})
.comment("currentop_query")
+ .collation({locale: "fr"})
.itcount(),
- 0);
+ 1);
},
+ command: "find",
planSummary: "COLLSCAN",
- currentOpFilter: currentOpFilter
+ currentOpFilter:
+ {"command.comment": "currentop_query", "command.collation": {locale: "fr"}}
});
+ }
+
+ //
+ // Confirm currentOp content for the $geoNear aggregation stage.
+ //
+ dropAndRecreateTestCollection();
+ for (let i = 0; i < 10; ++i) {
+ assert.commandWorked(coll.insert({a: i, loc: {type: "Point", coordinates: [i, i]}}));
+ }
+ assert.commandWorked(coll.createIndex({loc: "2dsphere"}));
+ confirmCurrentOpContents({
+ test: function(db) {
+ assert.commandWorked(db.runCommand({
+ aggregate: "currentop_query",
+ cursor: {},
+ pipeline: [{
+ $geoNear: {
+ near: {type: "Point", coordinates: [1, 1]},
+ distanceField: "dist",
+ spherical: true,
+ query: {$comment: "currentop_query"},
+ }
+ }],
+ collation: {locale: "fr"},
+ comment: "currentop_query",
+ }));
+ },
+ planSummary: "GEO_NEAR_2DSPHERE { loc: \"2dsphere\" }",
+ currentOpFilter: commandOrOriginatingCommand({
+ "aggregate": {$exists: true},
+ "pipeline.0.$geoNear.query.$comment": "currentop_query",
+ "collation": {locale: "fr"},
+ "comment": "currentop_query",
+ },
+ isRemoteShardCurOp)
+ });
+
+ //
+ // Confirm currentOp content for getMore. This case tests command and legacy getMore
+ // with originating find and aggregate commands.
+ //
+ dropAndRecreateTestCollection();
+ for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+ }
- // Verify that an originatingCommand truncated by currentOp appears as { $truncated:
- // <string>, comment: <string> }.
- const cmdRes = testDB.runCommand({
- find: "currentop_query",
- filter: TestData.queryFilter,
+ const originatingCommands = {
+ find: {find: "currentop_query", filter: {}, comment: "currentop_query", batchSize: 0},
+ aggregate: {
+ aggregate: "currentop_query",
+ pipeline: [{$match: {}}],
comment: "currentop_query",
- batchSize: 0
- });
+ cursor: {batchSize: 0}
+ }
+ };
+
+ for (let cmdName in originatingCommands) {
+ const cmdObj = originatingCommands[cmdName];
+ const cmdRes = testDB.runCommand(cmdObj);
assert.commandWorked(cmdRes);
TestData.commandResult = cmdRes;
- currentOpFilter = {
+ // If this is a non-localOps test running via mongoS, then the cursorID we obtained
+ // above is the ID of the mongoS cursor, and will not match the IDs of any of the
+ // individual shard cursors in the currentOp output. We therefore don't perform an
+ // exact match on 'command.getMore', but only verify that the cursor ID is non-zero.
+ const filter = {
"command.getMore":
(isRemoteShardCurOp ? {$gt: 0} : TestData.commandResult.cursor.id),
- "cursor.originatingCommand.$truncated": {$regex: truncatedQueryString},
+ [`cursor.originatingCommand.${cmdName}`]: {$exists: true},
"cursor.originatingCommand.comment": "currentop_query"
};
confirmCurrentOpContents({
test: function(db) {
- var cursor = new DBCommandCursor(db, TestData.commandResult, 5);
- assert.eq(cursor.itcount(), 0);
+ const cursor = new DBCommandCursor(db, TestData.commandResult, 5);
+ assert.eq(cursor.itcount(), 10);
},
+ command: "getMore",
planSummary: "COLLSCAN",
- currentOpFilter: currentOpFilter
+ currentOpFilter: filter
});
delete TestData.commandResult;
+ }
+
+ //
+ // Confirm that currentOp displays upconverted getMore and originatingCommand in the
+ // case of a legacy query.
+ //
+ if (readMode === "legacy") {
+ let filter = {
+ "command.getMore": {$gt: 0},
+ "command.collection": "currentop_query",
+ "command.batchSize": 2,
+ "cursor.originatingCommand.find": "currentop_query",
+ "cursor.originatingCommand.ntoreturn": 2,
+ "cursor.originatingCommand.comment": "currentop_query"
+ };
+
+ confirmCurrentOpContents({
+ test: function(db) {
+ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+ // Temporarily disable hanging yields so that we can iterate the first
+ // batch.
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: db.getSiblingDB("admin"),
+ cmdObj: {configureFailPoint: "setYieldAllLocksHang", mode: "off"}
+ });
+
+ let cursor =
+ db.currentop_query.find({}).comment("currentop_query").batchSize(2);
- // Verify that an aggregation truncated by currentOp appears as { $truncated: <string>,
- // comment: <string> } when a comment parameter is present.
- truncatedQueryString =
- "^\\{ aggregate: \"currentop_query\", pipeline: \\[ \\{ \\$match: \\{ " +
- "1: \"1{149}\", 2: \"2{149}\", 3: \"3{149}\", 4: \"4{149}\", 5: \"5{149}\", " +
- "6: \"6{149}\", 7: \"7+\\.\\.\\.";
+ // Exhaust the current batch so that the next request will force a getMore.
+ while (cursor.objsLeftInBatch() > 0) {
+ cursor.next();
+ }
- currentOpFilter = commandOrOriginatingCommand(
- {"$truncated": {$regex: truncatedQueryString}, "comment": "currentop_query"},
- isRemoteShardCurOp);
+ // Set yields to hang so that we can check currentOp output.
+ FixtureHelpers.runCommandOnEachPrimary({
+ db: db.getSiblingDB("admin"),
+ cmdObj: {
+ configureFailPoint: "setYieldAllLocksHang",
+ mode: "alwaysOn",
+ data: {namespace: db.currentop_query.getFullName()}
+ }
+ });
+
+ assert.eq(cursor.itcount(), 8);
+ },
+ operation: "getmore",
+ planSummary: "COLLSCAN",
+ currentOpFilter: filter
+ });
+ }
+ //
+ // Confirm that a legacy query whose filter contains a field named 'query' appears as
+ // expected in currentOp. This test ensures that upconverting a legacy query correctly
+ // identifies this as a user field rather than a wrapped filter spec.
+ //
+ if (readMode === "legacy") {
confirmCurrentOpContents({
test: function(db) {
- assert.eq(db.currentop_query
- .aggregate([{$match: TestData.queryFilter}],
- {comment: "currentop_query"})
+ assert.eq(db.currentop_query.find({query: "foo", $comment: "currentop_query"})
.itcount(),
0);
},
+ command: "find",
planSummary: "COLLSCAN",
- currentOpFilter: currentOpFilter
+ currentOpFilter:
+ {"command.filter.$comment": "currentop_query", "command.filter.query": "foo"}
});
-
- delete TestData.queryFilter;
}
}
- function currentOpCommand(inputDB, filter, truncatedOps, localOps) {
- return inputDB.currentOp(Object.assign(filter, {$truncateOps: truncatedOps}));
- }
+ /**
+ * Runs a set of tests to verify that currentOp will serialize objects exceeding ~1000 bytes
+ * to string when the 'truncateOps' parameter is set.
+ */
+ function runTruncationTests() {
+ dropAndRecreateTestCollection();
+ assert.writeOK(coll.insert({a: 1}));
+
+ // When the currentOp command serializes the query object as a string, individual string
+ // values inside it are truncated at 150 characters. To test "total length" truncation
+ // we need to pass multiple values, each smaller than 150 bytes.
+ TestData.queryFilter = {
+ "1": "1".repeat(149),
+ "2": "2".repeat(149),
+ "3": "3".repeat(149),
+ "4": "4".repeat(149),
+ "5": "5".repeat(149),
+ "6": "6".repeat(149),
+ "7": "7".repeat(149),
+ };
+
+ var truncatedQueryString = "^\\{ find: \"currentop_query\", filter: \\{ " +
+ "1: \"1{149}\", 2: \"2{149}\", 3: \"3{149}\", 4: \"4{149}\", 5: \"5{149}\", " +
+ "6: \"6{149}\", 7: \"7+\\.\\.\\.";
- function currentOpAgg(inputDB, filter, truncatedOps, localOps) {
- return {
- inprog: inputDB.getSiblingDB("admin")
- .aggregate([
- {
- $currentOp: {
- localOps: (localOps || false),
- truncateOps: (truncatedOps || false)
- }
- },
- {$match: filter}
- ])
- .toArray(),
- ok: 1
+ let currentOpFilter;
+
+ currentOpFilter = {
+ "command.$truncated": {$regex: truncatedQueryString},
+ "command.comment": "currentop_query"
};
- }
- for (let connType of[rsConn, mongosConn]) {
- for (let readMode of["commands", "legacy"]) {
- for (let truncatedOps of[false, true]) {
- for (let localOps of[false, true]) {
- // Run all tests using the $currentOp aggregation stage.
- runTests({
- conn: connType,
- readMode: readMode,
- currentOp: currentOpAgg,
- localOps: localOps,
- truncatedOps: truncatedOps
- });
- }
- // Run tests using the currentOp command. The 'localOps' parameter is not supported.
+ confirmCurrentOpContents({
+ test: function(db) {
+ assert.eq(db.currentop_query.find(TestData.queryFilter)
+ .comment("currentop_query")
+ .itcount(),
+ 0);
+ },
+ planSummary: "COLLSCAN",
+ currentOpFilter: currentOpFilter
+ });
+
+ // Verify that an originatingCommand truncated by currentOp appears as { $truncated:
+ // <string>, comment: <string> }.
+ const cmdRes = testDB.runCommand({
+ find: "currentop_query",
+ filter: TestData.queryFilter,
+ comment: "currentop_query",
+ batchSize: 0
+ });
+ assert.commandWorked(cmdRes);
+
+ TestData.commandResult = cmdRes;
+
+ currentOpFilter = {
+ "command.getMore": (isRemoteShardCurOp ? {$gt: 0} : TestData.commandResult.cursor.id),
+ "cursor.originatingCommand.$truncated": {$regex: truncatedQueryString},
+ "cursor.originatingCommand.comment": "currentop_query"
+ };
+
+ confirmCurrentOpContents({
+ test: function(db) {
+ var cursor = new DBCommandCursor(db, TestData.commandResult, 5);
+ assert.eq(cursor.itcount(), 0);
+ },
+ planSummary: "COLLSCAN",
+ currentOpFilter: currentOpFilter
+ });
+
+ delete TestData.commandResult;
+
+ // Verify that an aggregation truncated by currentOp appears as { $truncated: <string>,
+ // comment: <string> } when a comment parameter is present.
+ truncatedQueryString =
+ "^\\{ aggregate: \"currentop_query\", pipeline: \\[ \\{ \\$match: \\{ " +
+ "1: \"1{149}\", 2: \"2{149}\", 3: \"3{149}\", 4: \"4{149}\", 5: \"5{149}\", " +
+ "6: \"6{149}\", 7: \"7+\\.\\.\\.";
+
+ currentOpFilter = commandOrOriginatingCommand(
+ {"$truncated": {$regex: truncatedQueryString}, "comment": "currentop_query"},
+ isRemoteShardCurOp);
+
+ confirmCurrentOpContents({
+ test: function(db) {
+ assert.eq(
+ db.currentop_query
+ .aggregate([{$match: TestData.queryFilter}], {comment: "currentop_query"})
+ .itcount(),
+ 0);
+ },
+ planSummary: "COLLSCAN",
+ currentOpFilter: currentOpFilter
+ });
+
+ delete TestData.queryFilter;
+ }
+}
+
+function currentOpCommand(inputDB, filter, truncatedOps, localOps) {
+ return inputDB.currentOp(Object.assign(filter, {$truncateOps: truncatedOps}));
+}
+
+function currentOpAgg(inputDB, filter, truncatedOps, localOps) {
+ return {
+ inprog:
+ inputDB.getSiblingDB("admin")
+ .aggregate([
+ {
+ $currentOp:
+ {localOps: (localOps || false), truncateOps: (truncatedOps || false)}
+ },
+ {$match: filter}
+ ])
+ .toArray(),
+ ok: 1
+ };
+}
+
+for (let connType of [rsConn, mongosConn]) {
+ for (let readMode of ["commands", "legacy"]) {
+ for (let truncatedOps of [false, true]) {
+ for (let localOps of [false, true]) {
+ // Run all tests using the $currentOp aggregation stage.
runTests({
conn: connType,
readMode: readMode,
- currentOp: currentOpCommand,
- localOps: false,
+ currentOp: currentOpAgg,
+ localOps: localOps,
truncatedOps: truncatedOps
});
}
+ // Run tests using the currentOp command. The 'localOps' parameter is not supported.
+ runTests({
+ conn: connType,
+ readMode: readMode,
+ currentOp: currentOpCommand,
+ localOps: false,
+ truncatedOps: truncatedOps
+ });
}
}
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/currentop_transaction_metrics.js b/jstests/noPassthrough/currentop_transaction_metrics.js
index d676167c2c2..b65c39963f7 100644
--- a/jstests/noPassthrough/currentop_transaction_metrics.js
+++ b/jstests/noPassthrough/currentop_transaction_metrics.js
@@ -5,68 +5,68 @@
*/
(function() {
- 'use strict';
- load("jstests/core/txns/libs/prepare_helpers.js");
+'use strict';
+load("jstests/core/txns/libs/prepare_helpers.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const collName = 'currentop_transaction_metrics';
- const testDB = rst.getPrimary().getDB('test');
- const adminDB = rst.getPrimary().getDB('admin');
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
+const collName = 'currentop_transaction_metrics';
+const testDB = rst.getPrimary().getDB('test');
+const adminDB = rst.getPrimary().getDB('admin');
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB[collName].insert({x: 1}, {writeConcern: {w: "majority"}}));
- const session = adminDB.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase('test');
+const session = adminDB.getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase('test');
- session.startTransaction();
- // Run a few operations so that the transaction goes through several active/inactive periods.
- assert.commandWorked(sessionDB[collName].update({}, {a: 1}));
- assert.commandWorked(sessionDB[collName].insert({_id: "insert-1"}));
- assert.commandWorked(sessionDB[collName].insert({_id: "insert-2"}));
- assert.commandWorked(sessionDB[collName].insert({_id: "insert-3"}));
+session.startTransaction();
+// Run a few operations so that the transaction goes through several active/inactive periods.
+assert.commandWorked(sessionDB[collName].update({}, {a: 1}));
+assert.commandWorked(sessionDB[collName].insert({_id: "insert-1"}));
+assert.commandWorked(sessionDB[collName].insert({_id: "insert-2"}));
+assert.commandWorked(sessionDB[collName].insert({_id: "insert-3"}));
- const transactionFilter = {
- active: false,
- 'lsid': {$exists: true},
- 'transaction.parameters.txnNumber': {$eq: 0},
- 'transaction.parameters.autocommit': {$eq: false},
- 'transaction.timePreparedMicros': {$exists: false}
- };
+const transactionFilter = {
+ active: false,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0},
+ 'transaction.parameters.autocommit': {$eq: false},
+ 'transaction.timePreparedMicros': {$exists: false}
+};
- let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).toArray();
- assert.eq(currentOp.length, 1);
+let currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).toArray();
+assert.eq(currentOp.length, 1);
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- let transactionDocument = currentOp[0].transaction;
- assert.gte(transactionDocument.timeOpenMicros,
- transactionDocument.timeActiveMicros + transactionDocument.timeInactiveMicros);
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+let transactionDocument = currentOp[0].transaction;
+assert.gte(transactionDocument.timeOpenMicros,
+ transactionDocument.timeActiveMicros + transactionDocument.timeInactiveMicros);
- // Check that preparing the transaction enables the 'timePreparedMicros' field in currentOp.
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// Check that preparing the transaction enables the 'timePreparedMicros' field in currentOp.
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- const prepareTransactionFilter = {
- active: false,
- 'lsid': {$exists: true},
- 'transaction.parameters.txnNumber': {$eq: 0},
- 'transaction.parameters.autocommit': {$eq: false},
- 'transaction.timePreparedMicros': {$exists: true}
- };
+const prepareTransactionFilter = {
+ active: false,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0},
+ 'transaction.parameters.autocommit': {$eq: false},
+ 'transaction.timePreparedMicros': {$exists: true}
+};
- currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).toArray();
- assert.eq(currentOp.length, 1);
+currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: prepareTransactionFilter}]).toArray();
+assert.eq(currentOp.length, 1);
- // Check that the currentOp's transaction subdocument's fields align with our expectations.
- const prepareTransactionDocument = currentOp[0].transaction;
- assert.gte(prepareTransactionDocument.timeOpenMicros,
- prepareTransactionDocument.timeActiveMicros +
- prepareTransactionDocument.timeInactiveMicros);
- assert.gte(prepareTransactionDocument.timePreparedMicros, 0);
+// Check that the currentOp's transaction subdocument's fields align with our expectations.
+const prepareTransactionDocument = currentOp[0].transaction;
+assert.gte(
+ prepareTransactionDocument.timeOpenMicros,
+ prepareTransactionDocument.timeActiveMicros + prepareTransactionDocument.timeInactiveMicros);
+assert.gte(prepareTransactionDocument.timePreparedMicros, 0);
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- session.endSession();
+PrepareHelpers.commitTransaction(session, prepareTimestamp);
+session.endSession();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/cycle_detection_test.js b/jstests/noPassthrough/cycle_detection_test.js
index f708decae79..c4fa17b59b0 100644
--- a/jstests/noPassthrough/cycle_detection_test.js
+++ b/jstests/noPassthrough/cycle_detection_test.js
@@ -2,89 +2,89 @@
* Tests for the Graph#findCycle() method.
*/
(function() {
- 'use strict';
-
- load('jstests/libs/cycle_detection.js'); // for Graph
-
- (function testLinearChainHasNoCycle() {
- const graph = new Graph();
- graph.addEdge('A', 'B');
- graph.addEdge('B', 'C');
- graph.addEdge('C', 'D');
-
- assert.eq([], graph.findCycle());
- })();
-
- (function testGraphWithoutCycleButCommonAncestor() {
- const graph = new Graph();
- graph.addEdge('A', 'B');
- graph.addEdge('A', 'C');
- graph.addEdge('B', 'D');
- graph.addEdge('C', 'D');
-
- assert.eq([], graph.findCycle());
- })();
-
- (function testEmptyGraphHasNoCycle() {
- const graph = new Graph();
- assert.eq([], graph.findCycle());
- })();
-
- (function testGraphWithAllNodesInCycle() {
- const graph = new Graph();
- graph.addEdge(1, 2);
- graph.addEdge(2, 3);
- graph.addEdge(3, 4);
- graph.addEdge(4, 5);
- graph.addEdge(5, 1);
-
- assert.eq([1, 2, 3, 4, 5, 1], graph.findCycle());
- })();
-
- (function testGraphWithSomeNodesNotInCycle() {
- const graph = new Graph();
- graph.addEdge(1, 2);
- graph.addEdge(2, 3);
- graph.addEdge(3, 4);
- graph.addEdge(4, 5);
- graph.addEdge(5, 3);
-
- assert.eq([3, 4, 5, 3], graph.findCycle());
- })();
-
- (function testGraphWithSelfLoopConsideredCycle() {
- const graph = new Graph();
- graph.addEdge(0, 0);
- assert.eq([0, 0], graph.findCycle());
- })();
-
- (function testGraphUsesNonReferentialEquality() {
- const w = {a: new NumberInt(1)};
- const x = {a: new NumberInt(1)};
- const y = {a: new NumberLong(1)};
- const z = {a: 1};
-
- let graph = new Graph();
- graph.addEdge(w, x);
- assert.eq([w, x], graph.findCycle());
-
- graph = new Graph();
- graph.addEdge(w, y);
- assert.eq([], graph.findCycle());
-
- graph = new Graph();
- graph.addEdge(w, z);
- assert.eq([w, z], graph.findCycle());
- })();
-
- (function testGraphMinimizesCycleUsingNonReferentialEquality() {
- const graph = new Graph();
- graph.addEdge({a: 1}, {a: 2});
- graph.addEdge({a: 2}, {a: 3});
- graph.addEdge({a: 3}, {a: 4});
- graph.addEdge({a: 4}, {a: 5});
- graph.addEdge({a: 5}, {a: 3});
-
- assert.eq([{a: 3}, {a: 4}, {a: 5}, {a: 3}], graph.findCycle());
- })();
+'use strict';
+
+load('jstests/libs/cycle_detection.js'); // for Graph
+
+(function testLinearChainHasNoCycle() {
+ const graph = new Graph();
+ graph.addEdge('A', 'B');
+ graph.addEdge('B', 'C');
+ graph.addEdge('C', 'D');
+
+ assert.eq([], graph.findCycle());
+})();
+
+(function testGraphWithoutCycleButCommonAncestor() {
+ const graph = new Graph();
+ graph.addEdge('A', 'B');
+ graph.addEdge('A', 'C');
+ graph.addEdge('B', 'D');
+ graph.addEdge('C', 'D');
+
+ assert.eq([], graph.findCycle());
+})();
+
+(function testEmptyGraphHasNoCycle() {
+ const graph = new Graph();
+ assert.eq([], graph.findCycle());
+})();
+
+(function testGraphWithAllNodesInCycle() {
+ const graph = new Graph();
+ graph.addEdge(1, 2);
+ graph.addEdge(2, 3);
+ graph.addEdge(3, 4);
+ graph.addEdge(4, 5);
+ graph.addEdge(5, 1);
+
+ assert.eq([1, 2, 3, 4, 5, 1], graph.findCycle());
+})();
+
+(function testGraphWithSomeNodesNotInCycle() {
+ const graph = new Graph();
+ graph.addEdge(1, 2);
+ graph.addEdge(2, 3);
+ graph.addEdge(3, 4);
+ graph.addEdge(4, 5);
+ graph.addEdge(5, 3);
+
+ assert.eq([3, 4, 5, 3], graph.findCycle());
+})();
+
+(function testGraphWithSelfLoopConsideredCycle() {
+ const graph = new Graph();
+ graph.addEdge(0, 0);
+ assert.eq([0, 0], graph.findCycle());
+})();
+
+(function testGraphUsesNonReferentialEquality() {
+ const w = {a: new NumberInt(1)};
+ const x = {a: new NumberInt(1)};
+ const y = {a: new NumberLong(1)};
+ const z = {a: 1};
+
+ let graph = new Graph();
+ graph.addEdge(w, x);
+ assert.eq([w, x], graph.findCycle());
+
+ graph = new Graph();
+ graph.addEdge(w, y);
+ assert.eq([], graph.findCycle());
+
+ graph = new Graph();
+ graph.addEdge(w, z);
+ assert.eq([w, z], graph.findCycle());
+})();
+
+(function testGraphMinimizesCycleUsingNonReferentialEquality() {
+ const graph = new Graph();
+ graph.addEdge({a: 1}, {a: 2});
+ graph.addEdge({a: 2}, {a: 3});
+ graph.addEdge({a: 3}, {a: 4});
+ graph.addEdge({a: 4}, {a: 5});
+ graph.addEdge({a: 5}, {a: 3});
+
+ assert.eq([{a: 3}, {a: 4}, {a: 5}, {a: 3}], graph.findCycle());
+})();
})();
diff --git a/jstests/noPassthrough/data_consistency_checks.js b/jstests/noPassthrough/data_consistency_checks.js
index dcddefaf882..94c44f3e49b 100644
--- a/jstests/noPassthrough/data_consistency_checks.js
+++ b/jstests/noPassthrough/data_consistency_checks.js
@@ -9,193 +9,190 @@
var db;
(function() {
- "use strict";
-
- // We skip doing the data consistency checks while terminating the cluster because they conflict
- // with the counts of the number of times the "dbhash" and "validate" commands are run.
- TestData.skipCollectionAndIndexValidation = true;
- TestData.skipCheckDBHashes = true;
-
- function makePatternForDBHash(dbName) {
- return new RegExp("COMMAND.*command " + dbName +
- "\\.\\$cmd appName: \"MongoDB Shell\" command: db[Hh]ash",
- "g");
+"use strict";
+
+// We skip doing the data consistency checks while terminating the cluster because they conflict
+// with the counts of the number of times the "dbhash" and "validate" commands are run.
+TestData.skipCollectionAndIndexValidation = true;
+TestData.skipCheckDBHashes = true;
+
+function makePatternForDBHash(dbName) {
+ return new RegExp(
+ "COMMAND.*command " + dbName + "\\.\\$cmd appName: \"MongoDB Shell\" command: db[Hh]ash",
+ "g");
+}
+
+function makePatternForValidate(dbName, collName) {
+ return new RegExp("COMMAND.*command " + dbName +
+ "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" +
+ collName + "\"",
+ "g");
+}
+
+function countMatches(pattern, output) {
+ assert(pattern.global, "the 'g' flag must be used to find all matches");
+
+ let numMatches = 0;
+ while (pattern.exec(output) !== null) {
+ ++numMatches;
}
-
- function makePatternForValidate(dbName, collName) {
- return new RegExp(
- "COMMAND.*command " + dbName +
- "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" + collName +
- "\"",
- "g");
+ return numMatches;
+}
+
+function runDataConsistencyChecks(testCase) {
+ db = testCase.conn.getDB("test");
+ try {
+ clearRawMongoProgramOutput();
+
+ load("jstests/hooks/run_check_repl_dbhash.js");
+ load("jstests/hooks/run_validate_collections.js");
+
+ // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
+ // will return all of their output.
+ testCase.teardown();
+ return rawMongoProgramOutput();
+ } finally {
+ db = undefined;
}
+}
+
+(function testReplicaSetWithVotingSecondaries() {
+ const numNodes = 2;
+ const rst = new ReplSetTest({
+ nodes: numNodes,
+ nodeOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ }
+ });
+ rst.startSet();
+ rst.initiateWithNodeZeroAsPrimary();
+
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
+ assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
+ const output = runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
+
+ let pattern = makePatternForDBHash("test");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+})();
- function countMatches(pattern, output) {
- assert(pattern.global, "the 'g' flag must be used to find all matches");
-
- let numMatches = 0;
- while (pattern.exec(output) !== null) {
- ++numMatches;
+(function testReplicaSetWithNonVotingSecondaries() {
+ const numNodes = 2;
+ const rst = new ReplSetTest({
+ nodes: numNodes,
+ nodeOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
}
- return numMatches;
+ });
+ rst.startSet();
+
+ const replSetConfig = rst.getReplSetConfig();
+ for (let i = 1; i < numNodes; ++i) {
+ replSetConfig.members[i].priority = 0;
+ replSetConfig.members[i].votes = 0;
}
+ rst.initiate(replSetConfig);
- function runDataConsistencyChecks(testCase) {
- db = testCase.conn.getDB("test");
- try {
- clearRawMongoProgramOutput();
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
+ assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
+ const output = runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
- load("jstests/hooks/run_check_repl_dbhash.js");
- load("jstests/hooks/run_validate_collections.js");
+ let pattern = makePatternForDBHash("test");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
- // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
- // will return all of their output.
- testCase.teardown();
- return rawMongoProgramOutput();
- } finally {
- db = undefined;
- }
- }
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(numNodes,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each node in the log output");
+})();
- (function testReplicaSetWithVotingSecondaries() {
- const numNodes = 2;
- const rst = new ReplSetTest({
- nodes: numNodes,
- nodeOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- }
- });
- rst.startSet();
- rst.initiateWithNodeZeroAsPrimary();
-
- // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
- assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
- const output =
- runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
-
- let pattern = makePatternForDBHash("test");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
-
- pattern = makePatternForValidate("test", "mycoll");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
- })();
-
- (function testReplicaSetWithNonVotingSecondaries() {
- const numNodes = 2;
- const rst = new ReplSetTest({
- nodes: numNodes,
- nodeOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- }
- });
- rst.startSet();
-
- const replSetConfig = rst.getReplSetConfig();
- for (let i = 1; i < numNodes; ++i) {
- replSetConfig.members[i].priority = 0;
- replSetConfig.members[i].votes = 0;
+(function testShardedClusterWithOneNodeCSRS() {
+ const st = new ShardingTest({
+ mongos: 1,
+ config: 1,
+ configOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ },
+ shards: 1
+ });
+
+ // We shard a collection in order to guarantee that at least one collection on the "config"
+ // database exists for when we go to run the data consistency checks against the CSRS.
+ st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
+
+ const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
+
+ let pattern = makePatternForDBHash("config");
+ assert.eq(0,
+ countMatches(pattern, output),
+ "expected not to find " + tojson(pattern) + " in the log output for 1-node CSRS");
+
+ // The choice of using the "config.collections" collection here is mostly arbitrary as the
+ // "config.databases" and "config.chunks" collections are also implicitly created as part of
+ // sharding a collection.
+ pattern = makePatternForValidate("config", "collections");
+ assert.eq(1,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " in the log output for 1-node CSRS");
+})();
+
+(function testShardedCluster() {
+ const st = new ShardingTest({
+ mongos: 1,
+ config: 3,
+ configOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
+ },
+ shards: 1,
+ rs: {nodes: 2},
+ rsOptions: {
+ setParameter: {logComponentVerbosity: tojson({command: 1})},
}
- rst.initiate(replSetConfig);
-
- // Insert a document so the "dbhash" and "validate" commands have some actual work to do.
- assert.commandWorked(rst.nodes[0].getDB("test").mycoll.insert({}));
- const output =
- runDataConsistencyChecks({conn: rst.nodes[0], teardown: () => rst.stopSet()});
-
- let pattern = makePatternForDBHash("test");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
-
- pattern = makePatternForValidate("test", "mycoll");
- assert.eq(numNodes,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each node in the log output");
- })();
-
- (function testShardedClusterWithOneNodeCSRS() {
- const st = new ShardingTest({
- mongos: 1,
- config: 1,
- configOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- },
- shards: 1
- });
-
- // We shard a collection in order to guarantee that at least one collection on the "config"
- // database exists for when we go to run the data consistency checks against the CSRS.
- st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
-
- const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
-
- let pattern = makePatternForDBHash("config");
- assert.eq(0,
- countMatches(pattern, output),
- "expected not to find " + tojson(pattern) + " in the log output for 1-node CSRS");
-
- // The choice of using the "config.collections" collection here is mostly arbitrary as the
- // "config.databases" and "config.chunks" collections are also implicitly created as part of
- // sharding a collection.
- pattern = makePatternForValidate("config", "collections");
- assert.eq(1,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " in the log output for 1-node CSRS");
- })();
-
- (function testShardedCluster() {
- const st = new ShardingTest({
- mongos: 1,
- config: 3,
- configOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- },
- shards: 1,
- rs: {nodes: 2},
- rsOptions: {
- setParameter: {logComponentVerbosity: tojson({command: 1})},
- }
- });
-
- // We shard a collection in order to guarantee that at least one collection on the "config"
- // database exists for when we go to run the data consistency checks against the CSRS.
- st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
-
- // Insert a document so the "dbhash" and "validate" commands have some actual work to do on
- // the replica set shard.
- assert.commandWorked(st.s.getDB("test").mycoll.insert({_id: 0}));
- const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
-
- // The "config" database exists on both the CSRS and the replica set shards due to the
- // "config.transactions" collection.
- let pattern = makePatternForDBHash("config");
- assert.eq(5,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) +
- " from each CSRS node and each replica set shard node in the log output");
-
- // The choice of using the "config.collections" collection here is mostly arbitrary as the
- // "config.databases" and "config.chunks" collections are also implicitly created as part of
- // sharding a collection.
- pattern = makePatternForValidate("config", "collections");
- assert.eq(3,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from each CSRS node in the log output");
-
- pattern = makePatternForDBHash("test");
- assert.eq(2,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) +
- " from each replica set shard node in the log output");
-
- pattern = makePatternForValidate("test", "mycoll");
- assert.eq(2,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) +
- " from each replica set shard node in the log output");
- })();
+ });
+
+ // We shard a collection in order to guarantee that at least one collection on the "config"
+ // database exists for when we go to run the data consistency checks against the CSRS.
+ st.shardColl(st.s.getDB("test").mycoll, {_id: 1}, false);
+
+ // Insert a document so the "dbhash" and "validate" commands have some actual work to do on
+ // the replica set shard.
+ assert.commandWorked(st.s.getDB("test").mycoll.insert({_id: 0}));
+ const output = runDataConsistencyChecks({conn: st.s, teardown: () => st.stop()});
+
+ // The "config" database exists on both the CSRS and the replica set shards due to the
+ // "config.transactions" collection.
+ let pattern = makePatternForDBHash("config");
+ assert.eq(5,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each CSRS node and each replica set shard node in the log output");
+
+ // The choice of using the "config.collections" collection here is mostly arbitrary as the
+ // "config.databases" and "config.chunks" collections are also implicitly created as part of
+ // sharding a collection.
+ pattern = makePatternForValidate("config", "collections");
+ assert.eq(3,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from each CSRS node in the log output");
+
+ pattern = makePatternForDBHash("test");
+ assert.eq(2,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each replica set shard node in the log output");
+
+ pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(2,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) +
+ " from each replica set shard node in the log output");
+})();
})();
diff --git a/jstests/noPassthrough/dbhash_capped_collection.js b/jstests/noPassthrough/dbhash_capped_collection.js
index 195f003bea6..adf288bf1e9 100644
--- a/jstests/noPassthrough/dbhash_capped_collection.js
+++ b/jstests/noPassthrough/dbhash_capped_collection.js
@@ -4,52 +4,52 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const db = rst.getPrimary().getDB("test");
-
- // We create a capped collection as well as a non-capped collection and verify that the "capped"
- // field in the dbHash command response only lists the capped one.
- assert.commandWorked(db.runCommand({create: "noncapped"}));
- assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 4096}));
- let res = assert.commandWorked(db.runCommand({dbHash: 1}));
- assert.eq(["capped"], res.capped);
-
- // If the capped collection is excluded from the list of collections to md5sum, then it won't
- // appear in the "capped" field either.
- res = assert.commandWorked(db.runCommand({dbHash: 1, collections: ["noncapped"]}));
- assert.eq([], res.capped);
-
- {
- const session = db.getMongo().startSession();
-
- const hashesDefault = rst.getHashesUsingSessions([session], db.getName());
- const hashesFilterCapped =
- rst.getHashesUsingSessions([session], db.getName(), {filterCapped: true});
- const hashesNoFilterCapped =
- rst.getHashesUsingSessions([session], db.getName(), {filterCapped: false});
-
- assert.eq(["noncapped"],
- Object.keys(hashesFilterCapped[0].collections),
- "capped collection should have been filtered out");
- assert.eq(["capped", "noncapped"],
- Object.keys(hashesNoFilterCapped[0].collections).sort(),
- "capped collection should not have been filtered out");
- assert.eq(hashesDefault[0].collections,
- hashesFilterCapped[0].collections,
- "getHashesUsingSessions() should default to filter out capped collections");
-
- const hashes = rst.getHashes(db.getName());
- assert.eq(hashesNoFilterCapped[0].collections,
- hashes.master.collections,
- "getHashes() should default to not filter out capped collections");
-
- session.endSession();
- }
-
- rst.stopSet();
+"use strict";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const db = rst.getPrimary().getDB("test");
+
+// We create a capped collection as well as a non-capped collection and verify that the "capped"
+// field in the dbHash command response only lists the capped one.
+assert.commandWorked(db.runCommand({create: "noncapped"}));
+assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 4096}));
+let res = assert.commandWorked(db.runCommand({dbHash: 1}));
+assert.eq(["capped"], res.capped);
+
+// If the capped collection is excluded from the list of collections to md5sum, then it won't
+// appear in the "capped" field either.
+res = assert.commandWorked(db.runCommand({dbHash: 1, collections: ["noncapped"]}));
+assert.eq([], res.capped);
+
+{
+ const session = db.getMongo().startSession();
+
+ const hashesDefault = rst.getHashesUsingSessions([session], db.getName());
+ const hashesFilterCapped =
+ rst.getHashesUsingSessions([session], db.getName(), {filterCapped: true});
+ const hashesNoFilterCapped =
+ rst.getHashesUsingSessions([session], db.getName(), {filterCapped: false});
+
+ assert.eq(["noncapped"],
+ Object.keys(hashesFilterCapped[0].collections),
+ "capped collection should have been filtered out");
+ assert.eq(["capped", "noncapped"],
+ Object.keys(hashesNoFilterCapped[0].collections).sort(),
+ "capped collection should not have been filtered out");
+ assert.eq(hashesDefault[0].collections,
+ hashesFilterCapped[0].collections,
+ "getHashesUsingSessions() should default to filter out capped collections");
+
+ const hashes = rst.getHashes(db.getName());
+ assert.eq(hashesNoFilterCapped[0].collections,
+ hashes.master.collections,
+ "getHashes() should default to not filter out capped collections");
+
+ session.endSession();
+}
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/devnull.js b/jstests/noPassthrough/devnull.js
index c2e09279b5e..2244875efc9 100644
--- a/jstests/noPassthrough/devnull.js
+++ b/jstests/noPassthrough/devnull.js
@@ -1,13 +1,13 @@
(function() {
- var mongo = MongoRunner.runMongod({storageEngine: "devnull"});
+var mongo = MongoRunner.runMongod({storageEngine: "devnull"});
- db = mongo.getDB("test");
+db = mongo.getDB("test");
- res = db.foo.insert({x: 1});
- assert.eq(1, res.nInserted, tojson(res));
+res = db.foo.insert({x: 1});
+assert.eq(1, res.nInserted, tojson(res));
- // Skip collection validation during stopMongod if invalid storage engine.
- TestData.skipCollectionAndIndexValidation = true;
+// Skip collection validation during stopMongod if invalid storage engine.
+TestData.skipCollectionAndIndexValidation = true;
- MongoRunner.stopMongod(mongo);
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/directoryperdb.js b/jstests/noPassthrough/directoryperdb.js
index ce123ae08fb..56fe3c1f645 100644
--- a/jstests/noPassthrough/directoryperdb.js
+++ b/jstests/noPassthrough/directoryperdb.js
@@ -8,39 +8,39 @@
*/
(function() {
- 'use strict';
+'use strict';
- var baseDir = "jstests_directoryperdb";
- var dbpath = MongoRunner.dataPath + baseDir + "/";
+var baseDir = "jstests_directoryperdb";
+var dbpath = MongoRunner.dataPath + baseDir + "/";
- var isDirectoryPerDBSupported =
- jsTest.options().storageEngine == "wiredTiger" || !jsTest.options().storageEngine;
+var isDirectoryPerDBSupported =
+ jsTest.options().storageEngine == "wiredTiger" || !jsTest.options().storageEngine;
- var m = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: ''});
+var m = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: ''});
- if (!isDirectoryPerDBSupported) {
- assert.isnull(m, 'storage engine without directoryperdb support should fail to start up');
- return;
- } else {
- assert(m, 'storage engine with directoryperdb support failed to start up');
- }
+if (!isDirectoryPerDBSupported) {
+ assert.isnull(m, 'storage engine without directoryperdb support should fail to start up');
+ return;
+} else {
+ assert(m, 'storage engine with directoryperdb support failed to start up');
+}
- var db = m.getDB("foo");
- db.bar.insert({x: 1});
- assert.eq(1, db.bar.count());
+var db = m.getDB("foo");
+db.bar.insert({x: 1});
+assert.eq(1, db.bar.count());
- db.adminCommand({fsync: 1});
- var dbpathFiles = listFiles(dbpath);
- var files = dbpathFiles.filter(function(z) {
- return z.name.endsWith("/foo");
- });
- assert.eq(1, files.length, 'dbpath does not contain "foo" directory: ' + tojson(dbpathFiles));
+db.adminCommand({fsync: 1});
+var dbpathFiles = listFiles(dbpath);
+var files = dbpathFiles.filter(function(z) {
+ return z.name.endsWith("/foo");
+});
+assert.eq(1, files.length, 'dbpath does not contain "foo" directory: ' + tojson(dbpathFiles));
- files = listFiles(files[0].name);
- assert(files.length > 0);
+files = listFiles(files[0].name);
+assert(files.length > 0);
- MongoRunner.stopMongod(m);
+MongoRunner.stopMongod(m);
- // Subsequent attempt to start server using same dbpath without directoryperdb should fail.
- assert.isnull(MongoRunner.runMongod({dbpath: dbpath, restart: true}));
+// Subsequent attempt to start server using same dbpath without directoryperdb should fail.
+assert.isnull(MongoRunner.runMongod({dbpath: dbpath, restart: true}));
}());
diff --git a/jstests/noPassthrough/disable_majority_reads_restart.js b/jstests/noPassthrough/disable_majority_reads_restart.js
index 596eabad052..0d21f0d07f3 100644
--- a/jstests/noPassthrough/disable_majority_reads_restart.js
+++ b/jstests/noPassthrough/disable_majority_reads_restart.js
@@ -5,78 +5,78 @@
* requires_wiredtiger]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- // Insert a document and ensure it is in the stable checkpoint by restarting.
- let coll = rst.getPrimary().getDB(dbName)[collName];
- assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- rst.stopSet(undefined, true);
- rst.startSet(undefined, true);
+// Insert a document and ensure it is in the stable checkpoint by restarting.
+let coll = rst.getPrimary().getDB(dbName)[collName];
+assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+rst.stopSet(undefined, true);
+rst.startSet(undefined, true);
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+assert.commandWorked(
+ rst.getPrimary().adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
- // Insert a document that will not be in a stable checkpoint.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.commandWorked(coll.insert({_id: 1}));
+// Insert a document that will not be in a stable checkpoint.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.commandWorked(coll.insert({_id: 1}));
- // Restart the node with enableMajorityReadConcern:false.
- rst.stopSet(undefined, true);
- rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
+// Restart the node with enableMajorityReadConcern:false.
+rst.stopSet(undefined, true);
+rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
- // Both inserts should be reflected in the data and the oplog.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
- let oplog = rst.getPrimary().getDB("local").oplog.rs;
- assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
+// Both inserts should be reflected in the data and the oplog.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
+let oplog = rst.getPrimary().getDB("local").oplog.rs;
+assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
- // Restart the node with enableMajorityReadConcern:false without adding any documents.
- rst.stopSet(undefined, true);
- rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
+// Restart the node with enableMajorityReadConcern:false without adding any documents.
+rst.stopSet(undefined, true);
+rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
- // Both inserts should still be reflected in the data and the oplog.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
- oplog = rst.getPrimary().getDB("local").oplog.rs;
- assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
+// Both inserts should still be reflected in the data and the oplog.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.eq([{_id: 0}, {_id: 1}], coll.find().sort({_id: 1}).toArray());
+oplog = rst.getPrimary().getDB("local").oplog.rs;
+assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
- // Insert another document.
- assert.commandWorked(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+// Insert another document.
+assert.commandWorked(coll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- // Restart the node with enableMajorityReadConcern:false.
- rst.stopSet(undefined, true);
- rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
+// Restart the node with enableMajorityReadConcern:false.
+rst.stopSet(undefined, true);
+rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
- // All three inserts should be reflected in the data and the oplog.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
- oplog = rst.getPrimary().getDB("local").oplog.rs;
- assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
+// All three inserts should be reflected in the data and the oplog.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
+oplog = rst.getPrimary().getDB("local").oplog.rs;
+assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
- // Restart the node with enableMajorityReadConcern:true.
- rst.stopSet(undefined, true);
- rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
+// Restart the node with enableMajorityReadConcern:true.
+rst.stopSet(undefined, true);
+rst.startSet({noCleanData: true, enableMajorityReadConcern: "false"});
- // All three inserts should still be reflected in the data and the oplog.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
- oplog = rst.getPrimary().getDB("local").oplog.rs;
- assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
- assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
+// All three inserts should still be reflected in the data and the oplog.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.eq([{_id: 0}, {_id: 1}, {_id: 2}], coll.find().sort({_id: 1}).toArray());
+oplog = rst.getPrimary().getDB("local").oplog.rs;
+assert.eq(1, oplog.find({o: {_id: 0}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 1}}).itcount());
+assert.eq(1, oplog.find({o: {_id: 2}}).itcount());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/disabled_test_parameters.js b/jstests/noPassthrough/disabled_test_parameters.js
index 948883278ad..38ebed92310 100644
--- a/jstests/noPassthrough/disabled_test_parameters.js
+++ b/jstests/noPassthrough/disabled_test_parameters.js
@@ -1,44 +1,44 @@
// Test that test-only set parameters are disabled.
(function() {
- 'use strict';
+'use strict';
- function assertFails(opts) {
- assert.eq(null, MongoRunner.runMongod(opts), "Mongod startup up");
- }
+function assertFails(opts) {
+ assert.eq(null, MongoRunner.runMongod(opts), "Mongod startup up");
+}
- function assertStarts(opts) {
- const mongod = MongoRunner.runMongod(opts);
- assert(mongod, "Mongod startup up");
- MongoRunner.stopMongod(mongod);
- }
+function assertStarts(opts) {
+ const mongod = MongoRunner.runMongod(opts);
+ assert(mongod, "Mongod startup up");
+ MongoRunner.stopMongod(mongod);
+}
- setJsTestOption('enableTestCommands', false);
+setJsTestOption('enableTestCommands', false);
- // enableTestCommands not specified.
- assertFails({
+// enableTestCommands not specified.
+assertFails({
+ 'setParameter': {
+ enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
+ },
+});
+
+// enableTestCommands specified as truthy.
+['1', 'true'].forEach(v => {
+ assertStarts({
'setParameter': {
+ enableTestCommands: v,
enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
},
});
+});
- // enableTestCommands specified as truthy.
- ['1', 'true'].forEach(v => {
- assertStarts({
- 'setParameter': {
- enableTestCommands: v,
- enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
- },
- });
- });
-
- // enableTestCommands specified as falsy.
- ['0', 'false'].forEach(v => {
- assertFails({
- 'setParameter': {
- enableTestCommands: v,
- enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
- },
- });
+// enableTestCommands specified as falsy.
+['0', 'false'].forEach(v => {
+ assertFails({
+ 'setParameter': {
+ enableTestCommands: v,
+ enableIndexBuildsCoordinatorForCreateIndexesCommand: 'false',
+ },
});
+});
}());
diff --git a/jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js b/jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js
index 0e6d4cdbfb2..98f9bd41dfd 100644
--- a/jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js
+++ b/jstests/noPassthrough/do_not_drop_coll_after_succesful_out.js
@@ -1,28 +1,28 @@
// Confirms that there's no attempt to drop a temp collection after $out is performed.
(function() {
- "use strict";
+"use strict";
- // Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
- // of data logged for each op.
- TestData.skipGossipingClusterTime = true;
+// Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
+// of data logged for each op.
+TestData.skipGossipingClusterTime = true;
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
- const coll = testDB.do_not_drop_coll_after_succesful_out;
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+const testDB = conn.getDB("test");
+const coll = testDB.do_not_drop_coll_after_succesful_out;
- assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
- assert.commandWorked(testDB.setLogLevel(2, "command"));
- assert.commandWorked(testDB.adminCommand({clearLog: "global"}));
+assert.commandWorked(testDB.setLogLevel(2, "command"));
+assert.commandWorked(testDB.adminCommand({clearLog: "global"}));
- coll.aggregate([{$out: coll.getName() + "_out"}]);
- const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log;
+coll.aggregate([{$out: coll.getName() + "_out"}]);
+const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log;
- for (let i = 0; i < log.length; ++i) {
- const line = log[i];
- assert.eq(line.indexOf("drop test.tmp.agg_out"), -1, line);
- }
+for (let i = 0; i < log.length; ++i) {
+ const line = log[i];
+ assert.eq(line.indexOf("drop test.tmp.agg_out"), -1, line);
+}
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js b/jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js
index 12fd52a09f2..cfb1d102019 100644
--- a/jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js
+++ b/jstests/noPassthrough/do_not_rebuild_indexes_before_repair.js
@@ -6,61 +6,60 @@
* @tags: [requires_persistence, requires_replication, requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "indexRebuild";
- const collName = "coll";
+const dbName = "indexRebuild";
+const collName = "coll";
- const rst = new ReplSetTest({
- name: "doNotRebuildIndexesBeforeRepair",
- nodes: 2,
- nodeOptions:
- {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
- });
- const nodes = rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ name: "doNotRebuildIndexesBeforeRepair",
+ nodes: 2,
+ nodeOptions: {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
+});
+const nodes = rst.startSet();
+rst.initiate();
- if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
- // Only snapshotting storage engines can pause advancing the stable timestamp allowing us
- // to get into a state where indexes exist, but the underlying tables were dropped.
- rst.stopSet();
- return;
- }
+if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
+ // Only snapshotting storage engines can pause advancing the stable timestamp allowing us
+ // to get into a state where indexes exist, but the underlying tables were dropped.
+ rst.stopSet();
+ return;
+}
- let coll = rst.getPrimary().getDB(dbName)[collName];
- assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}], {}, {writeConcern: {w: "majority"}}));
- assert.eq(3, coll.getIndexes().length);
- rst.awaitReplication(undefined, ReplSetTest.OpTimeType.LAST_DURABLE);
+let coll = rst.getPrimary().getDB(dbName)[collName];
+assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}], {}, {writeConcern: {w: "majority"}}));
+assert.eq(3, coll.getIndexes().length);
+rst.awaitReplication(undefined, ReplSetTest.OpTimeType.LAST_DURABLE);
- // Lock the index entries into a stable checkpoint by shutting down.
- rst.stopSet(undefined, true);
- rst.startSet(undefined, true);
+// Lock the index entries into a stable checkpoint by shutting down.
+rst.stopSet(undefined, true);
+rst.startSet(undefined, true);
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- // Dropping the index would normally modify the collection metadata and drop the
- // table. Because we're not advancing the stable timestamp and we're going to crash the
- // server, the catalog change won't take effect, but ident being dropped will.
- coll = rst.getPrimary().getDB(dbName)[collName];
- assert.commandWorked(coll.dropIndexes());
- rst.awaitReplication();
+// Dropping the index would normally modify the collection metadata and drop the
+// table. Because we're not advancing the stable timestamp and we're going to crash the
+// server, the catalog change won't take effect, but ident being dropped will.
+coll = rst.getPrimary().getDB(dbName)[collName];
+assert.commandWorked(coll.dropIndexes());
+rst.awaitReplication();
- let primaryDbpath = rst.getPrimary().dbpath;
- let primaryPort = rst.getPrimary().port;
- rst.stopSet(9, true, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+let primaryDbpath = rst.getPrimary().dbpath;
+let primaryPort = rst.getPrimary().port;
+rst.stopSet(9, true, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- // This should succeed in rebuilding the indexes, but only after the databases have been
- // repaired.
- assert.eq(
- 0, runMongoProgram("mongod", "--repair", "--port", primaryPort, "--dbpath", primaryDbpath));
+// This should succeed in rebuilding the indexes, but only after the databases have been
+// repaired.
+assert.eq(0,
+ runMongoProgram("mongod", "--repair", "--port", primaryPort, "--dbpath", primaryDbpath));
- // Restarting the replica set would roll back the index drop. Instead we want to start a
- // standalone and verify that repair rebuilt the indexes.
- let mongod = MongoRunner.runMongod({dbpath: primaryDbpath, noCleanData: true});
- assert.eq(3, mongod.getDB(dbName)[collName].getIndexes().length);
+// Restarting the replica set would roll back the index drop. Instead we want to start a
+// standalone and verify that repair rebuilt the indexes.
+let mongod = MongoRunner.runMongod({dbpath: primaryDbpath, noCleanData: true});
+assert.eq(3, mongod.getDB(dbName)[collName].getIndexes().length);
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/noPassthrough/document_count_functions.js b/jstests/noPassthrough/document_count_functions.js
index c8accb8c5c3..c1dd15ee591 100644
--- a/jstests/noPassthrough/document_count_functions.js
+++ b/jstests/noPassthrough/document_count_functions.js
@@ -3,58 +3,56 @@
* Tests the countDocuments and estimatedDocumentCount commands.
*/
(function() {
- "use strict";
+"use strict";
- const standalone = MongoRunner.runMongod();
- const dbName = "test";
- const db = standalone.getDB(dbName);
- const collName = "document_count_functions";
- const coll = db.getCollection(collName);
+const standalone = MongoRunner.runMongod();
+const dbName = "test";
+const db = standalone.getDB(dbName);
+const collName = "document_count_functions";
+const coll = db.getCollection(collName);
- coll.drop();
+coll.drop();
- assert.commandWorked(coll.insert({i: 1, j: 1}));
- assert.commandWorked(coll.insert({i: 2, j: 1}));
- assert.commandWorked(coll.insert({i: 2, j: 2}));
+assert.commandWorked(coll.insert({i: 1, j: 1}));
+assert.commandWorked(coll.insert({i: 2, j: 1}));
+assert.commandWorked(coll.insert({i: 2, j: 2}));
- // Base case: Pass a valid query into countDocuments without any extra options.
- assert.eq(1, coll.countDocuments({i: 1}));
- assert.eq(2, coll.countDocuments({i: 2}));
+// Base case: Pass a valid query into countDocuments without any extra options.
+assert.eq(1, coll.countDocuments({i: 1}));
+assert.eq(2, coll.countDocuments({i: 2}));
- // Base case: Call estimatedDocumentCount without any extra options.
- assert.eq(3, coll.estimatedDocumentCount());
+// Base case: Call estimatedDocumentCount without any extra options.
+assert.eq(3, coll.estimatedDocumentCount());
- assert.commandWorked(coll.insert({i: 1, j: 2}));
- assert.commandWorked(coll.insert({i: 1, j: 3}));
- assert.commandWorked(coll.insert({i: 1, j: 4}));
+assert.commandWorked(coll.insert({i: 1, j: 2}));
+assert.commandWorked(coll.insert({i: 1, j: 3}));
+assert.commandWorked(coll.insert({i: 1, j: 4}));
- // Limit case: Limit the number of documents to count. There are 4 {i: 1} documents,
- // but we will set the limit to 3.
- assert.eq(3, coll.countDocuments({i: 1}, {limit: 3}));
+// Limit case: Limit the number of documents to count. There are 4 {i: 1} documents,
+// but we will set the limit to 3.
+assert.eq(3, coll.countDocuments({i: 1}, {limit: 3}));
- // Skip case: Skip a certain number of documents for the count. We will skip 2, meaning
- // that we will have 2 left.
- assert.eq(2, coll.countDocuments({i: 1}, {skip: 2}));
+// Skip case: Skip a certain number of documents for the count. We will skip 2, meaning
+// that we will have 2 left.
+assert.eq(2, coll.countDocuments({i: 1}, {skip: 2}));
- assert.commandWorked(coll.ensureIndex({i: 1}));
+assert.commandWorked(coll.ensureIndex({i: 1}));
- // Aggregate stage case: Add an option that gets added as an aggregation argument.
- assert.eq(4, coll.countDocuments({i: 1}, {hint: {i: 1}}));
+// Aggregate stage case: Add an option that gets added as an aggregation argument.
+assert.eq(4, coll.countDocuments({i: 1}, {hint: {i: 1}}));
- // Set fail point to make sure estimatedDocumentCount times out.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: 'alwaysOn'}));
+// Set fail point to make sure estimatedDocumentCount times out.
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: 'alwaysOn'}));
- // maxTimeMS case: Expect an error if an operation times out.
- assert.commandFailedWithCode(assert.throws(function() {
- coll.estimatedDocumentCount({maxTimeMS: 100});
- }),
- ErrorCodes.MaxTimeMSExpired);
+// maxTimeMS case: Expect an error if an operation times out.
+assert.commandFailedWithCode(assert.throws(function() {
+ coll.estimatedDocumentCount({maxTimeMS: 100});
+ }),
+ ErrorCodes.MaxTimeMSExpired);
- // Disable fail point.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: 'off'}));
-
- MongoRunner.stopMongod(standalone);
+// Disable fail point.
+assert.commandWorked(db.adminCommand({configureFailPoint: 'maxTimeAlwaysTimeOut', mode: 'off'}));
+MongoRunner.stopMongod(standalone);
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/drop_connections_replSet.js b/jstests/noPassthrough/drop_connections_replSet.js
index d8e07397afe..b9ec6c93368 100644
--- a/jstests/noPassthrough/drop_connections_replSet.js
+++ b/jstests/noPassthrough/drop_connections_replSet.js
@@ -4,51 +4,51 @@
*/
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 3});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- rst.awaitSecondaryNodes();
-
- function getConnPoolHosts() {
- const ret = primary.adminCommand({connPoolStats: 1});
- assert.commandWorked(ret);
- jsTestLog("Connection pool stats by host: " + tojson(ret.hosts));
- return ret.hosts;
- }
-
- // To test the dropConnections command, first remove the secondary. This should have no effect
- // on the existing connection pool, but it'll prevent the primary from reconnecting to it after
- // dropConnections. Then, execute dropConnections and check that the primary has 0 connections
- // to the secondary.
- const cfg = primary.getDB('local').system.replset.findOne();
- const memberHost = cfg.members[2].host;
- assert.eq(memberHost in getConnPoolHosts(), true);
-
- const removedMember = cfg.members.splice(2, 1);
- assert.eq(removedMember[0].host, memberHost);
- cfg.version++;
-
- jsTestLog("Reconfiguring to omit " + memberHost);
- assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
-
- // Reconfig did not affect the connection pool
- assert.eq(memberHost in getConnPoolHosts(), true);
-
- // Test dropConnections
- jsTestLog("Dropping connections to " + memberHost);
- assert.commandWorked(primary.adminCommand({dropConnections: 1, hostAndPort: [memberHost]}));
- assert.soon(() => {
- return !(memberHost in getConnPoolHosts());
- });
-
- // Need to re-add removed node, or the test complains about the replset config
- cfg.members.push(removedMember[0]);
- cfg.version++;
- assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
-
- rst.stopSet();
+"use strict";
+
+const rst = new ReplSetTest({nodes: 3});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+rst.awaitSecondaryNodes();
+
+function getConnPoolHosts() {
+ const ret = primary.adminCommand({connPoolStats: 1});
+ assert.commandWorked(ret);
+ jsTestLog("Connection pool stats by host: " + tojson(ret.hosts));
+ return ret.hosts;
+}
+
+// To test the dropConnections command, first remove the secondary. This should have no effect
+// on the existing connection pool, but it'll prevent the primary from reconnecting to it after
+// dropConnections. Then, execute dropConnections and check that the primary has 0 connections
+// to the secondary.
+const cfg = primary.getDB('local').system.replset.findOne();
+const memberHost = cfg.members[2].host;
+assert.eq(memberHost in getConnPoolHosts(), true);
+
+const removedMember = cfg.members.splice(2, 1);
+assert.eq(removedMember[0].host, memberHost);
+cfg.version++;
+
+jsTestLog("Reconfiguring to omit " + memberHost);
+assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
+
+// Reconfig did not affect the connection pool
+assert.eq(memberHost in getConnPoolHosts(), true);
+
+// Test dropConnections
+jsTestLog("Dropping connections to " + memberHost);
+assert.commandWorked(primary.adminCommand({dropConnections: 1, hostAndPort: [memberHost]}));
+assert.soon(() => {
+ return !(memberHost in getConnPoolHosts());
+});
+
+// Need to re-add removed node, or the test complains about the replset config
+cfg.members.push(removedMember[0]);
+cfg.version++;
+assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/drop_connections_sharded.js b/jstests/noPassthrough/drop_connections_sharded.js
index 7d2f605946e..205992b2dfc 100644
--- a/jstests/noPassthrough/drop_connections_sharded.js
+++ b/jstests/noPassthrough/drop_connections_sharded.js
@@ -4,49 +4,49 @@
*/
(function() {
- "use strict";
-
- const st = new ShardingTest({
- config: {nodes: 1},
- shards: 1,
- rs0: {nodes: 3},
- mongos: 1,
- });
- const mongos = st.s0;
- const rst = st.rs0;
- const primary = rst.getPrimary();
-
- mongos.adminCommand({multicast: {ping: 0}});
-
- function getConnPoolHosts() {
- const ret = mongos.adminCommand({connPoolStats: 1});
- assert.commandWorked(ret);
- jsTestLog("Connection pool stats by host: " + tojson(ret.hosts));
- return ret.hosts;
- }
-
- const cfg = primary.getDB('local').system.replset.findOne();
- const memberHost = cfg.members[2].host;
- assert.eq(memberHost in getConnPoolHosts(), true);
-
- const removedMember = cfg.members.splice(2, 1);
- assert.eq(removedMember[0].host, memberHost);
- cfg.version++;
-
- jsTestLog("Reconfiguring to omit " + memberHost);
- assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
- assert.eq(memberHost in getConnPoolHosts(), true);
-
- jsTestLog("Dropping connections to " + memberHost);
- assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: [memberHost]}));
- assert.soon(() => {
- return !(memberHost in getConnPoolHosts());
- });
-
- // need to re-add removed node or test complain about the replset config
- cfg.members.push(removedMember[0]);
- cfg.version++;
- assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
-
- st.stop();
+"use strict";
+
+const st = new ShardingTest({
+ config: {nodes: 1},
+ shards: 1,
+ rs0: {nodes: 3},
+ mongos: 1,
+});
+const mongos = st.s0;
+const rst = st.rs0;
+const primary = rst.getPrimary();
+
+mongos.adminCommand({multicast: {ping: 0}});
+
+function getConnPoolHosts() {
+ const ret = mongos.adminCommand({connPoolStats: 1});
+ assert.commandWorked(ret);
+ jsTestLog("Connection pool stats by host: " + tojson(ret.hosts));
+ return ret.hosts;
+}
+
+const cfg = primary.getDB('local').system.replset.findOne();
+const memberHost = cfg.members[2].host;
+assert.eq(memberHost in getConnPoolHosts(), true);
+
+const removedMember = cfg.members.splice(2, 1);
+assert.eq(removedMember[0].host, memberHost);
+cfg.version++;
+
+jsTestLog("Reconfiguring to omit " + memberHost);
+assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
+assert.eq(memberHost in getConnPoolHosts(), true);
+
+jsTestLog("Dropping connections to " + memberHost);
+assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: [memberHost]}));
+assert.soon(() => {
+ return !(memberHost in getConnPoolHosts());
+});
+
+// need to re-add removed node or test complain about the replset config
+cfg.members.push(removedMember[0]);
+cfg.version++;
+assert.commandWorked(primary.adminCommand({replSetReconfig: cfg}));
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/drop_view_does_not_take_database_X.js b/jstests/noPassthrough/drop_view_does_not_take_database_X.js
index 02efa840085..69cafb65f58 100644
--- a/jstests/noPassthrough/drop_view_does_not_take_database_X.js
+++ b/jstests/noPassthrough/drop_view_does_not_take_database_X.js
@@ -5,29 +5,29 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- const conn = MongoRunner.runMongod({});
- const db = conn.getDB("test");
+const conn = MongoRunner.runMongod({});
+const db = conn.getDB("test");
- assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
- assert.commandWorked(db.createView("view", "a", []));
+assert.commandWorked(db.runCommand({insert: "a", documents: [{x: 1}]}));
+assert.commandWorked(db.createView("view", "a", []));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "alwaysOn"}));
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "alwaysOn"}));
- // This only holds a database IX lock.
- const awaitDrop =
- startParallelShell(() => assert(db.getSiblingDB("test")["view"].drop()), conn.port);
- checkLog.contains(conn, "hangDuringDropCollection fail point enabled");
+// This only holds a database IX lock.
+const awaitDrop =
+ startParallelShell(() => assert(db.getSiblingDB("test")["view"].drop()), conn.port);
+checkLog.contains(conn, "hangDuringDropCollection fail point enabled");
- // This takes a database IX lock and should not be blocked.
- assert.commandWorked(db.runCommand({insert: "a", documents: [{y: 1}]}));
+// This takes a database IX lock and should not be blocked.
+assert.commandWorked(db.runCommand({insert: "a", documents: [{y: 1}]}));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "off"}));
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangDuringDropCollection", mode: "off"}));
- awaitDrop();
- MongoRunner.stopMongod(conn);
+awaitDrop();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/dropcollection_duplicate_fields.js b/jstests/noPassthrough/dropcollection_duplicate_fields.js
index a2a1c1c8839..22e7c3fdeeb 100644
--- a/jstests/noPassthrough/dropcollection_duplicate_fields.js
+++ b/jstests/noPassthrough/dropcollection_duplicate_fields.js
@@ -4,26 +4,25 @@
*/
(function() {
- "use strict";
- var conn = MongoRunner.runMongod();
- var db = conn.getDB('test');
+"use strict";
+var conn = MongoRunner.runMongod();
+var db = conn.getDB('test');
- let coll = db.dropcollection_duplicate_fields;
- // Repeat 100 times for the sake of probabilities
- for (let i = 0; i < 100; i++) {
- coll.drop();
- coll.insert({x: 1});
+let coll = db.dropcollection_duplicate_fields;
+// Repeat 100 times for the sake of probabilities
+for (let i = 0; i < 100; i++) {
+ coll.drop();
+ coll.insert({x: 1});
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.1}}));
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.1}}));
- // will blow up if res is not valid
- let res = db.runCommand({drop: 'dropcollection_duplicate_fields'});
+ // will blow up if res is not valid
+ let res = db.runCommand({drop: 'dropcollection_duplicate_fields'});
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'WTWriteConflictException', mode: "off"}));
- }
-
- MongoRunner.stopMongod(conn);
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'WTWriteConflictException', mode: "off"}));
+}
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/dropdatabase_respect_maxtimems.js b/jstests/noPassthrough/dropdatabase_respect_maxtimems.js
index 972d21c6e24..db93575c993 100644
--- a/jstests/noPassthrough/dropdatabase_respect_maxtimems.js
+++ b/jstests/noPassthrough/dropdatabase_respect_maxtimems.js
@@ -3,62 +3,62 @@
* @tags: [requires_replication, uses_transactions]
*/
(function() {
- const rst = ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const adminDB = rst.getPrimary().getDB("admin");
- const txnDB = rst.getPrimary().getDB("txn");
- const dropDB = rst.getPrimary().getDB("drop");
+const adminDB = rst.getPrimary().getDB("admin");
+const txnDB = rst.getPrimary().getDB("txn");
+const dropDB = rst.getPrimary().getDB("drop");
- (function assertColletionDropCanBeInterrupted() {
- assert.commandWorked(txnDB.foo.insert({}));
- assert.commandWorked(dropDB.bar.insert({}));
- const session = txnDB.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase("txn");
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.insert({}));
- assert.commandFailedWithCode(dropDB.runCommand({dropDatabase: 1, maxTimeMS: 100}),
- ErrorCodes.MaxTimeMSExpired);
+(function assertColletionDropCanBeInterrupted() {
+ assert.commandWorked(txnDB.foo.insert({}));
+ assert.commandWorked(dropDB.bar.insert({}));
+ const session = txnDB.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase("txn");
+ session.startTransaction();
+ assert.commandWorked(sessionDB.foo.insert({}));
+ assert.commandFailedWithCode(dropDB.runCommand({dropDatabase: 1, maxTimeMS: 100}),
+ ErrorCodes.MaxTimeMSExpired);
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- })();
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+})();
- (function assertDatabaseDropCanBeInterrupted() {
- assert.commandWorked(txnDB.foo.insert({}));
- assert.commandWorked(dropDB.bar.insert({}));
+(function assertDatabaseDropCanBeInterrupted() {
+ assert.commandWorked(txnDB.foo.insert({}));
+ assert.commandWorked(dropDB.bar.insert({}));
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "dropDatabaseHangAfterAllCollectionsDrop", mode: "alwaysOn"}));
+ assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "dropDatabaseHangAfterAllCollectionsDrop", mode: "alwaysOn"}));
- // This will get blocked by the failpoint when collection drop phase finishes.
- let dropDatabaseShell = startParallelShell(
- "assert.commandFailedWithCode(db.getSiblingDB(\"drop\").runCommand({dropDatabase: 1, maxTimeMS: 5000}), ErrorCodes.MaxTimeMSExpired);",
- rst.getPrimary().port);
+ // This will get blocked by the failpoint when collection drop phase finishes.
+ let dropDatabaseShell = startParallelShell(
+ "assert.commandFailedWithCode(db.getSiblingDB(\"drop\").runCommand({dropDatabase: 1, maxTimeMS: 5000}), ErrorCodes.MaxTimeMSExpired);",
+ rst.getPrimary().port);
- assert.soon(function() {
- const sessionFilter = {active: true, "command.dropDatabase": 1};
- const res = adminDB.aggregate([{$currentOp: {}}, {$match: sessionFilter}]);
- return res.hasNext();
- }, "Timeout waiting for dropDatabase to start");
+ assert.soon(function() {
+ const sessionFilter = {active: true, "command.dropDatabase": 1};
+ const res = adminDB.aggregate([{$currentOp: {}}, {$match: sessionFilter}]);
+ return res.hasNext();
+ }, "Timeout waiting for dropDatabase to start");
- const session = txnDB.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase("txn");
- session.startTransaction();
- assert.commandWorked(sessionDB.foo.insert({}));
+ const session = txnDB.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase("txn");
+ session.startTransaction();
+ assert.commandWorked(sessionDB.foo.insert({}));
- // dropDatabase now gets unblocked by the failpoint but will immediately
- // get blocked by acquiring the GlobalWrite lock for dropping the database.
- assert.commandWorked(rst.getPrimary().adminCommand(
- {configureFailPoint: "dropDatabaseHangAfterAllCollectionsDrop", mode: "off"}));
+ // dropDatabase now gets unblocked by the failpoint but will immediately
+ // get blocked by acquiring the GlobalWrite lock for dropping the database.
+ assert.commandWorked(rst.getPrimary().adminCommand(
+ {configureFailPoint: "dropDatabaseHangAfterAllCollectionsDrop", mode: "off"}));
- // This should timeout.
- dropDatabaseShell();
+ // This should timeout.
+ dropDatabaseShell();
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- })();
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+})();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/durable_view_catalog.js b/jstests/noPassthrough/durable_view_catalog.js
index 06702fa5acd..23de01b4b30 100644
--- a/jstests/noPassthrough/durable_view_catalog.js
+++ b/jstests/noPassthrough/durable_view_catalog.js
@@ -5,93 +5,84 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
+'use strict';
- // The following test verifies that writeConcern: {j: true} ensures that the view catalog is
- // durable.
- let dbpath = MongoRunner.dataPath + '_durable_view_catalog';
- resetDbpath(dbpath);
+// The following test verifies that writeConcern: {j: true} ensures that the view catalog is
+// durable.
+let dbpath = MongoRunner.dataPath + '_durable_view_catalog';
+resetDbpath(dbpath);
- let mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
+let mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
- // Start a mongod.
- let conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(null, conn, 'mongod was unable to start up');
+// Start a mongod.
+let conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(null, conn, 'mongod was unable to start up');
- // Now connect to the mongod, create, remove and modify views and then abruptly stop the server.
- let viewsDB = conn.getDB('test');
- let pipe = [{$match: {}}];
- assert.commandWorked(
- viewsDB.runCommand({create: "view1", viewOn: "collection", pipeline: pipe}));
- assert.commandWorked(
- viewsDB.runCommand({create: "view2", viewOn: "collection", pipeline: pipe}));
- assert.commandWorked(
- viewsDB.runCommand({create: "view3", viewOn: "collection", pipeline: pipe}));
- assert.commandWorked(viewsDB.runCommand({collMod: "view3", viewOn: "view2"}));
- // On the final modification, require a sync to ensure durability.
- assert.commandWorked(viewsDB.runCommand({drop: "view1", writeConcern: {j: 1}}));
+// Now connect to the mongod, create, remove and modify views and then abruptly stop the server.
+let viewsDB = conn.getDB('test');
+let pipe = [{$match: {}}];
+assert.commandWorked(viewsDB.runCommand({create: "view1", viewOn: "collection", pipeline: pipe}));
+assert.commandWorked(viewsDB.runCommand({create: "view2", viewOn: "collection", pipeline: pipe}));
+assert.commandWorked(viewsDB.runCommand({create: "view3", viewOn: "collection", pipeline: pipe}));
+assert.commandWorked(viewsDB.runCommand({collMod: "view3", viewOn: "view2"}));
+// On the final modification, require a sync to ensure durability.
+assert.commandWorked(viewsDB.runCommand({drop: "view1", writeConcern: {j: 1}}));
- // Hard kill the mongod to ensure the data was indeed synced to durable storage.
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+// Hard kill the mongod to ensure the data was indeed synced to durable storage.
+MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- // Restart the mongod.
- conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
+// Restart the mongod.
+conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
- // Check that our journaled write still is present.
- viewsDB = conn.getDB('test');
- let actualViews = viewsDB.system.views.find().toArray();
- let expectedViews = [
- {"_id": "test.view2", "viewOn": "collection", "pipeline": pipe},
- {"_id": "test.view3", "viewOn": "view2", "pipeline": pipe}
- ];
- assert.eq(actualViews, expectedViews, "view definitions not correctly persisted");
- let listedViews = viewsDB.runCommand({listCollections: 1, filter: {type: "view"}})
- .cursor.firstBatch.map((function(x) {
- return {
- _id: "test." + x.name,
- viewOn: x.options.viewOn,
- pipeline: x.options.pipeline
- };
- }));
- assert.sameMembers(
- listedViews, expectedViews, "persisted view definitions not correctly loaded");
+// Check that our journaled write still is present.
+viewsDB = conn.getDB('test');
+let actualViews = viewsDB.system.views.find().toArray();
+let expectedViews = [
+ {"_id": "test.view2", "viewOn": "collection", "pipeline": pipe},
+ {"_id": "test.view3", "viewOn": "view2", "pipeline": pipe}
+];
+assert.eq(actualViews, expectedViews, "view definitions not correctly persisted");
+let listedViews =
+ viewsDB.runCommand({listCollections: 1, filter: {type: "view"}})
+ .cursor.firstBatch.map((function(x) {
+ return {_id: "test." + x.name, viewOn: x.options.viewOn, pipeline: x.options.pipeline};
+ }));
+assert.sameMembers(listedViews, expectedViews, "persisted view definitions not correctly loaded");
- // Insert an invalid view definition directly into system.views to bypass normal validation.
- assert.writeOK(viewsDB.system.views.insert({_id: "badView", pipeline: "badType"}));
+// Insert an invalid view definition directly into system.views to bypass normal validation.
+assert.writeOK(viewsDB.system.views.insert({_id: "badView", pipeline: "badType"}));
- // Skip collection validation during stopMongod if invalid views exists.
- TestData.skipValidationOnInvalidViewDefinitions = true;
+// Skip collection validation during stopMongod if invalid views exists.
+TestData.skipValidationOnInvalidViewDefinitions = true;
- // Restarting the mongod should succeed despite the presence of invalid view definitions.
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(
- null,
- conn,
- "after inserting bad views, failed to restart mongod with options: " + tojson(mongodArgs));
+// Restarting the mongod should succeed despite the presence of invalid view definitions.
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(
+ null,
+ conn,
+ "after inserting bad views, failed to restart mongod with options: " + tojson(mongodArgs));
- // Now that the database's view catalog has been marked as invalid, all view operations in that
- // database should fail.
- viewsDB = conn.getDB("test");
- assert.commandFailedWithCode(viewsDB.runCommand({find: "view2"}),
- ErrorCodes.InvalidViewDefinition);
- assert.commandFailedWithCode(viewsDB.runCommand({create: "view4", viewOn: "collection"}),
- ErrorCodes.InvalidViewDefinition);
- assert.commandFailedWithCode(viewsDB.runCommand({collMod: "view2", viewOn: "view4"}),
- ErrorCodes.InvalidViewDefinition);
- assert.commandFailedWithCode(viewsDB.runCommand({drop: "view4"}),
- ErrorCodes.InvalidViewDefinition);
- assert.commandFailedWithCode(viewsDB.runCommand({listCollections: 1}),
- ErrorCodes.InvalidViewDefinition);
+// Now that the database's view catalog has been marked as invalid, all view operations in that
+// database should fail.
+viewsDB = conn.getDB("test");
+assert.commandFailedWithCode(viewsDB.runCommand({find: "view2"}), ErrorCodes.InvalidViewDefinition);
+assert.commandFailedWithCode(viewsDB.runCommand({create: "view4", viewOn: "collection"}),
+ ErrorCodes.InvalidViewDefinition);
+assert.commandFailedWithCode(viewsDB.runCommand({collMod: "view2", viewOn: "view4"}),
+ ErrorCodes.InvalidViewDefinition);
+assert.commandFailedWithCode(viewsDB.runCommand({drop: "view4"}), ErrorCodes.InvalidViewDefinition);
+assert.commandFailedWithCode(viewsDB.runCommand({listCollections: 1}),
+ ErrorCodes.InvalidViewDefinition);
- // Manually remove the invalid view definition from system.views, and then verify that view
- // operations work successfully without requiring a server restart.
- assert.writeOK(viewsDB.system.views.remove({_id: "badView"}));
- assert.commandWorked(viewsDB.runCommand({find: "view2"}));
- assert.commandWorked(viewsDB.runCommand({create: "view4", viewOn: "collection"}));
- assert.commandWorked(viewsDB.runCommand({collMod: "view2", viewOn: "view4"}));
- assert.commandWorked(viewsDB.runCommand({drop: "view4"}));
- assert.commandWorked(viewsDB.runCommand({listCollections: 1}));
- MongoRunner.stopMongod(conn);
+// Manually remove the invalid view definition from system.views, and then verify that view
+// operations work successfully without requiring a server restart.
+assert.writeOK(viewsDB.system.views.remove({_id: "badView"}));
+assert.commandWorked(viewsDB.runCommand({find: "view2"}));
+assert.commandWorked(viewsDB.runCommand({create: "view4", viewOn: "collection"}));
+assert.commandWorked(viewsDB.runCommand({collMod: "view2", viewOn: "view4"}));
+assert.commandWorked(viewsDB.runCommand({drop: "view4"}));
+assert.commandWorked(viewsDB.runCommand({listCollections: 1}));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/end_sessions_command.js b/jstests/noPassthrough/end_sessions_command.js
index 4999410e953..3f32d95c42f 100644
--- a/jstests/noPassthrough/end_sessions_command.js
+++ b/jstests/noPassthrough/end_sessions_command.js
@@ -1,93 +1,92 @@
(function() {
- "use script";
-
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
-
- var res;
- var refresh = {refreshLogicalSessionCacheNow: 1};
- var startSession = {startSession: 1};
-
- // Start up a standalone server.
- var conn = MongoRunner.runMongod();
- var admin = conn.getDB("admin");
- var config = conn.getDB("config");
-
- // Trigger an initial refresh, as a sanity check.
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- var sessions = [];
- for (var i = 0; i < 20; i++) {
- res = admin.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- sessions.push(res);
- }
-
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- assert.eq(config.system.sessions.count(), 20, "refresh should have written 20 session records");
-
- var endSessionsIds = [];
- for (var i = 0; i < 10; i++) {
- endSessionsIds.push(sessions[i].id);
- }
- res = admin.runCommand({endSessions: endSessionsIds});
- assert.commandWorked(res, "failed to end sessions");
-
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- assert.eq(config.system.sessions.count(),
- 10,
- "endSessions and refresh should result in 10 remaining sessions");
-
- // double delete the remaining 10
- endSessionsIds = [];
- for (var i = 10; i < 20; i++) {
- endSessionsIds.push(sessions[i].id);
- endSessionsIds.push(sessions[i].id);
- }
-
- res = admin.runCommand({endSessions: endSessionsIds});
- assert.commandWorked(res, "failed to end sessions");
-
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
+"use script";
+
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
+
+var res;
+var refresh = {refreshLogicalSessionCacheNow: 1};
+var startSession = {startSession: 1};
+
+// Start up a standalone server.
+var conn = MongoRunner.runMongod();
+var admin = conn.getDB("admin");
+var config = conn.getDB("config");
+
+// Trigger an initial refresh, as a sanity check.
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+var sessions = [];
+for (var i = 0; i < 20; i++) {
+ res = admin.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+ sessions.push(res);
+}
+
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+assert.eq(config.system.sessions.count(), 20, "refresh should have written 20 session records");
+
+var endSessionsIds = [];
+for (var i = 0; i < 10; i++) {
+ endSessionsIds.push(sessions[i].id);
+}
+res = admin.runCommand({endSessions: endSessionsIds});
+assert.commandWorked(res, "failed to end sessions");
+
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+assert.eq(config.system.sessions.count(),
+ 10,
+ "endSessions and refresh should result in 10 remaining sessions");
+
+// double delete the remaining 10
+endSessionsIds = [];
+for (var i = 10; i < 20; i++) {
+ endSessionsIds.push(sessions[i].id);
+ endSessionsIds.push(sessions[i].id);
+}
+
+res = admin.runCommand({endSessions: endSessionsIds});
+assert.commandWorked(res, "failed to end sessions");
+
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+assert.eq(config.system.sessions.count(),
+ 0,
+ "endSessions and refresh should result in 0 remaining sessions");
+
+// delete some sessions that were never created
+res = admin.runCommand({
+ endSessions: [
+ {"id": UUID("bacb219c-214c-47f9-a94a-6c7f434b3bae")},
+ {"id": UUID("bacb219c-214c-47f9-a94a-6c7f434b3baf")}
+ ]
+});
+
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+// verify that end on the session handle actually ends sessions
+{
+ var session = conn.startSession();
+
+ assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
+ "do something to tickle the session");
+ assert.commandWorked(session.getDatabase("admin").runCommand(refresh), "failed to refresh");
+ assert.eq(config.system.sessions.count(), 1, "usersInfo should have written 1 session record");
+
+ session.endSession();
+ assert.commandWorked(admin.runCommand(refresh), "failed to refresh");
assert.eq(config.system.sessions.count(),
0,
"endSessions and refresh should result in 0 remaining sessions");
+}
- // delete some sessions that were never created
- res = admin.runCommand({
- endSessions: [
- {"id": UUID("bacb219c-214c-47f9-a94a-6c7f434b3bae")},
- {"id": UUID("bacb219c-214c-47f9-a94a-6c7f434b3baf")}
- ]
- });
-
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- // verify that end on the session handle actually ends sessions
- {
- var session = conn.startSession();
-
- assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
- "do something to tickle the session");
- assert.commandWorked(session.getDatabase("admin").runCommand(refresh), "failed to refresh");
- assert.eq(
- config.system.sessions.count(), 1, "usersInfo should have written 1 session record");
-
- session.endSession();
- assert.commandWorked(admin.runCommand(refresh), "failed to refresh");
- assert.eq(config.system.sessions.count(),
- 0,
- "endSessions and refresh should result in 0 remaining sessions");
- }
-
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/exchange_in_session.js b/jstests/noPassthrough/exchange_in_session.js
index 20261c0c081..b78d45d27be 100644
--- a/jstests/noPassthrough/exchange_in_session.js
+++ b/jstests/noPassthrough/exchange_in_session.js
@@ -5,81 +5,81 @@
* @tags: [requires_sharding, uses_transactions]
*/
(function() {
- // This test manually simulates a session, which is not compatible with implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test manually simulates a session, which is not compatible with implicit sessions.
+TestData.disableImplicitSessions = true;
- // Start a sharded cluster. For this test, we'll just need to talk to the shard directly.
- const st = new ShardingTest({shards: 1, mongos: 1});
+// Start a sharded cluster. For this test, we'll just need to talk to the shard directly.
+const st = new ShardingTest({shards: 1, mongos: 1});
- const adminDB = st.shard0.getDB("admin");
- const session = st.shard0.getDB("test").getMongo().startSession();
- const shardDB = session.getDatabase("test");
- const coll = shardDB.exchange_in_session;
+const adminDB = st.shard0.getDB("admin");
+const session = st.shard0.getDB("test").getMongo().startSession();
+const shardDB = session.getDatabase("test");
+const coll = shardDB.exchange_in_session;
- let bigString = '';
- for (let i = 0; i < 20; i++) {
- bigString += 's';
- }
+let bigString = '';
+for (let i = 0; i < 20; i++) {
+ bigString += 's';
+}
- // Insert some documents.
- const nDocs = 50;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({_id: i, bigString: bigString}));
- }
+// Insert some documents.
+const nDocs = 50;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({_id: i, bigString: bigString}));
+}
- session.startTransaction();
+session.startTransaction();
- // Set up an Exchange with two cursors.
- let res = assert.commandWorked(shardDB.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- exchange: {
- policy: 'keyRange',
- consumers: NumberInt(2),
- key: {_id: 1},
- boundaries: [{a: MinKey}, {a: nDocs / 2}, {a: MaxKey}],
- consumerIds: [NumberInt(0), NumberInt(1)],
- bufferSize: NumberInt(128)
- },
- cursor: {batchSize: 0},
- }));
+// Set up an Exchange with two cursors.
+let res = assert.commandWorked(shardDB.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [],
+ exchange: {
+ policy: 'keyRange',
+ consumers: NumberInt(2),
+ key: {_id: 1},
+ boundaries: [{a: MinKey}, {a: nDocs / 2}, {a: MaxKey}],
+ consumerIds: [NumberInt(0), NumberInt(1)],
+ bufferSize: NumberInt(128)
+ },
+ cursor: {batchSize: 0},
+}));
- function spawnShellToIterateCursor(cursorId) {
- let code = `const cursor = ${tojson(cursorId)};`;
- code += `const sessionId = ${tojson(session.getSessionId())};`;
- code += `const collName = "${coll.getName()}";`;
- function iterateCursorWithNoDocs() {
- const getMoreCmd = {
- getMore: cursor.id,
- collection: collName,
- batchSize: 4,
- lsid: sessionId,
- txnNumber: NumberLong(0),
- autocommit: false
- };
+function spawnShellToIterateCursor(cursorId) {
+ let code = `const cursor = ${tojson(cursorId)};`;
+ code += `const sessionId = ${tojson(session.getSessionId())};`;
+ code += `const collName = "${coll.getName()}";`;
+ function iterateCursorWithNoDocs() {
+ const getMoreCmd = {
+ getMore: cursor.id,
+ collection: collName,
+ batchSize: 4,
+ lsid: sessionId,
+ txnNumber: NumberLong(0),
+ autocommit: false
+ };
- let resp = null;
- while (!resp || resp.cursor.id != 0) {
- resp = assert.commandWorked(db.runCommand(getMoreCmd));
- }
+ let resp = null;
+ while (!resp || resp.cursor.id != 0) {
+ resp = assert.commandWorked(db.runCommand(getMoreCmd));
}
- code += `(${iterateCursorWithNoDocs.toString()})();`;
- return startParallelShell(code, st.rs0.getPrimary().port);
}
+ code += `(${iterateCursorWithNoDocs.toString()})();`;
+ return startParallelShell(code, st.rs0.getPrimary().port);
+}
- let parallelShells = [];
- for (let curs of res.cursors) {
- parallelShells.push(spawnShellToIterateCursor(curs.cursor));
- }
+let parallelShells = [];
+for (let curs of res.cursors) {
+ parallelShells.push(spawnShellToIterateCursor(curs.cursor));
+}
- assert.soon(function() {
- for (let waitFn of parallelShells) {
- waitFn();
- }
- return true;
- });
+assert.soon(function() {
+ for (let waitFn of parallelShells) {
+ waitFn();
+ }
+ return true;
+});
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/exhaust_option_disallowed_in_session.js b/jstests/noPassthrough/exhaust_option_disallowed_in_session.js
index 983a0f90682..1ba1014dc2f 100644
--- a/jstests/noPassthrough/exhaust_option_disallowed_in_session.js
+++ b/jstests/noPassthrough/exhaust_option_disallowed_in_session.js
@@ -2,31 +2,31 @@
* Make sure the 'exhaust' query option is not able to be used in a session.
*/
(function() {
- "use strict";
+"use strict";
- let conn = MongoRunner.runMongod();
+let conn = MongoRunner.runMongod();
- const dbName = 'test';
- const collName = 'coll';
+const dbName = 'test';
+const collName = 'coll';
- const session = conn.startSession();
- const sessionColl = session.getDatabase(dbName).getCollection(collName);
- const testColl = conn.getDB(dbName).getCollection(collName);
+const session = conn.startSession();
+const sessionColl = session.getDatabase(dbName).getCollection(collName);
+const testColl = conn.getDB(dbName).getCollection(collName);
- testColl.drop();
+testColl.drop();
- // Create a collection to query.
- assert.commandWorked(testColl.insert({_id: 1}));
+// Create a collection to query.
+assert.commandWorked(testColl.insert({_id: 1}));
- // Exhaust outside of session should work.
- let docs = testColl.find().addOption(DBQuery.Option.exhaust).toArray();
- assert.docEq([{_id: 1}], docs);
+// Exhaust outside of session should work.
+let docs = testColl.find().addOption(DBQuery.Option.exhaust).toArray();
+assert.docEq([{_id: 1}], docs);
- // Exhaust in session should fail.
- assert.throws(() => {
- sessionColl.find().addOption(DBQuery.Option.exhaust).toArray();
- });
+// Exhaust in session should fail.
+assert.throws(() => {
+ sessionColl.find().addOption(DBQuery.Option.exhaust).toArray();
+});
- session.endSession();
- MongoRunner.stopMongod(conn);
+session.endSession();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/exit_logging.js b/jstests/noPassthrough/exit_logging.js
index 4e6be5c811f..168f63a4579 100644
--- a/jstests/noPassthrough/exit_logging.js
+++ b/jstests/noPassthrough/exit_logging.js
@@ -5,105 +5,104 @@
(function() {
- function makeShutdownByCrashFn(crashHow) {
- return function(conn) {
- var admin = conn.getDB("admin");
- assert.commandWorked(admin.runCommand(
- {configureFailPoint: "crashOnShutdown", mode: "alwaysOn", data: {how: crashHow}}));
- admin.shutdownServer();
- };
- }
-
- function makeRegExMatchFn(pattern) {
- return function(text) {
- return pattern.test(text);
- };
- }
-
- function testShutdownLogging(launcher, crashFn, matchFn, expectedExitCode) {
- clearRawMongoProgramOutput();
- var conn = launcher.start({});
-
- function checkOutput() {
- var logContents = rawMongoProgramOutput();
- function printLog() {
- // We can't just return a string because it will be well over the max
- // line length.
- // So we just print manually.
- print("================ BEGIN LOG CONTENTS ==================");
- logContents.split(/\n/).forEach((line) => {
- print(line);
- });
- print("================ END LOG CONTENTS =====================");
- return "";
- }
-
- assert(matchFn(logContents), printLog);
+function makeShutdownByCrashFn(crashHow) {
+ return function(conn) {
+ var admin = conn.getDB("admin");
+ assert.commandWorked(admin.runCommand(
+ {configureFailPoint: "crashOnShutdown", mode: "alwaysOn", data: {how: crashHow}}));
+ admin.shutdownServer();
+ };
+}
+
+function makeRegExMatchFn(pattern) {
+ return function(text) {
+ return pattern.test(text);
+ };
+}
+
+function testShutdownLogging(launcher, crashFn, matchFn, expectedExitCode) {
+ clearRawMongoProgramOutput();
+ var conn = launcher.start({});
+
+ function checkOutput() {
+ var logContents = rawMongoProgramOutput();
+ function printLog() {
+ // We can't just return a string because it will be well over the max
+ // line length.
+ // So we just print manually.
+ print("================ BEGIN LOG CONTENTS ==================");
+ logContents.split(/\n/).forEach((line) => {
+ print(line);
+ });
+ print("================ END LOG CONTENTS =====================");
+ return "";
}
- crashFn(conn);
- launcher.stop(conn, undefined, {allowedExitCode: expectedExitCode});
- checkOutput();
- }
-
- function runAllTests(launcher) {
- const SIGSEGV = 11;
- const SIGABRT = 6;
- testShutdownLogging(launcher, function(conn) {
- conn.getDB('admin').shutdownServer();
- }, makeRegExMatchFn(/shutdown command received/), MongoRunner.EXIT_CLEAN);
-
- testShutdownLogging(launcher,
- makeShutdownByCrashFn('fault'),
- makeRegExMatchFn(/Invalid access at address[\s\S]*printStackTrace/),
- -SIGSEGV);
-
- testShutdownLogging(launcher,
- makeShutdownByCrashFn('abort'),
- makeRegExMatchFn(/Got signal[\s\S]*printStackTrace/),
- -SIGABRT);
- }
-
- if (_isWindows()) {
- print("SKIPPING TEST ON WINDOWS");
- return;
+ assert(matchFn(logContents), printLog);
}
- if (_isAddressSanitizerActive()) {
- print("SKIPPING TEST ON ADDRESS SANITIZER BUILD");
- return;
- }
-
- (function testMongod() {
- print("********************\nTesting exit logging in mongod\n********************");
-
- runAllTests({
- start: function(opts) {
- var actualOpts = {nojournal: ""};
- Object.extend(actualOpts, opts);
- return MongoRunner.runMongod(actualOpts);
- },
-
- stop: MongoRunner.stopMongod
- });
- }());
-
- (function testMongos() {
- print("********************\nTesting exit logging in mongos\n********************");
+ crashFn(conn);
+ launcher.stop(conn, undefined, {allowedExitCode: expectedExitCode});
+ checkOutput();
+}
+
+function runAllTests(launcher) {
+ const SIGSEGV = 11;
+ const SIGABRT = 6;
+ testShutdownLogging(launcher, function(conn) {
+ conn.getDB('admin').shutdownServer();
+ }, makeRegExMatchFn(/shutdown command received/), MongoRunner.EXIT_CLEAN);
+
+ testShutdownLogging(launcher,
+ makeShutdownByCrashFn('fault'),
+ makeRegExMatchFn(/Invalid access at address[\s\S]*printStackTrace/),
+ -SIGSEGV);
+
+ testShutdownLogging(launcher,
+ makeShutdownByCrashFn('abort'),
+ makeRegExMatchFn(/Got signal[\s\S]*printStackTrace/),
+ -SIGABRT);
+}
+
+if (_isWindows()) {
+ print("SKIPPING TEST ON WINDOWS");
+ return;
+}
+
+if (_isAddressSanitizerActive()) {
+ print("SKIPPING TEST ON ADDRESS SANITIZER BUILD");
+ return;
+}
+
+(function testMongod() {
+ print("********************\nTesting exit logging in mongod\n********************");
+
+ runAllTests({
+ start: function(opts) {
+ var actualOpts = {nojournal: ""};
+ Object.extend(actualOpts, opts);
+ return MongoRunner.runMongod(actualOpts);
+ },
+
+ stop: MongoRunner.stopMongod
+ });
+}());
- var st = new ShardingTest({shards: 1});
- var mongosLauncher = {
- start: function(opts) {
- var actualOpts = {configdb: st._configDB};
- Object.extend(actualOpts, opts);
- return MongoRunner.runMongos(actualOpts);
- },
+(function testMongos() {
+ print("********************\nTesting exit logging in mongos\n********************");
- stop: MongoRunner.stopMongos
- };
+ var st = new ShardingTest({shards: 1});
+ var mongosLauncher = {
+ start: function(opts) {
+ var actualOpts = {configdb: st._configDB};
+ Object.extend(actualOpts, opts);
+ return MongoRunner.runMongos(actualOpts);
+ },
- runAllTests(mongosLauncher);
- st.stop();
- }());
+ stop: MongoRunner.stopMongos
+ };
+ runAllTests(mongosLauncher);
+ st.stop();
+}());
}());
diff --git a/jstests/noPassthrough/failcommand_failpoint_not_parallel.js b/jstests/noPassthrough/failcommand_failpoint_not_parallel.js
index 2241dbe5d89..a9d776d0079 100644
--- a/jstests/noPassthrough/failcommand_failpoint_not_parallel.js
+++ b/jstests/noPassthrough/failcommand_failpoint_not_parallel.js
@@ -1,24 +1,23 @@
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn);
- const db = conn.getDB("test_failcommand_noparallel");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn);
+const db = conn.getDB("test_failcommand_noparallel");
- // Test times when closing connection.
- assert.commandWorked(db.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {
- closeConnection: true,
- failCommands: ["find"],
- }
- }));
- assert.throws(() => db.runCommand({find: "c"}));
- assert.throws(() => db.runCommand({find: "c"}));
- assert.commandWorked(db.runCommand({find: "c"}));
- assert.commandWorked(db.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- MongoRunner.stopMongod(conn);
+// Test times when closing connection.
+assert.commandWorked(db.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {
+ closeConnection: true,
+ failCommands: ["find"],
+ }
+}));
+assert.throws(() => db.runCommand({find: "c"}));
+assert.throws(() => db.runCommand({find: "c"}));
+assert.commandWorked(db.runCommand({find: "c"}));
+assert.commandWorked(db.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/feature_compatibility_version.js b/jstests/noPassthrough/feature_compatibility_version.js
index 64cd6a3a5f7..4f7cd42f450 100644
--- a/jstests/noPassthrough/feature_compatibility_version.js
+++ b/jstests/noPassthrough/feature_compatibility_version.js
@@ -2,55 +2,55 @@
// the value of the featureCompatibilityVersion server parameter.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/feature_compatibility_version.js");
+load("jstests/libs/feature_compatibility_version.js");
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
- let adminDB = conn.getDB("admin");
+let adminDB = conn.getDB("admin");
- // Initially the featureCompatibilityVersion is latestFCV.
- checkFCV(adminDB, latestFCV);
+// Initially the featureCompatibilityVersion is latestFCV.
+checkFCV(adminDB, latestFCV);
- // Updating the featureCompatibilityVersion document changes the featureCompatibilityVersion
- // server parameter.
- assert.writeOK(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV}}));
- checkFCV(adminDB, lastStableFCV);
+// Updating the featureCompatibilityVersion document changes the featureCompatibilityVersion
+// server parameter.
+assert.writeOK(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV}}));
+checkFCV(adminDB, lastStableFCV);
- assert.writeOK(
- adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV, targetVersion: latestFCV}}));
- checkFCV(adminDB, lastStableFCV, latestFCV);
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV, targetVersion: latestFCV}}));
+checkFCV(adminDB, lastStableFCV, latestFCV);
- assert.writeOK(adminDB.system.version.update(
- {_id: "featureCompatibilityVersion"},
- {$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
- checkFCV(adminDB, lastStableFCV, lastStableFCV);
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: lastStableFCV, targetVersion: lastStableFCV}}));
+checkFCV(adminDB, lastStableFCV, lastStableFCV);
- assert.writeOK(
- adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: latestFCV}, $unset: {targetVersion: true}}));
- checkFCV(adminDB, latestFCV);
+assert.writeOK(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {version: latestFCV}, $unset: {targetVersion: true}}));
+checkFCV(adminDB, latestFCV);
- // Updating the featureCompatibilityVersion document with an invalid version fails.
- assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {version: "3.2"}}),
- ErrorCodes.BadValue);
- checkFCV(adminDB, latestFCV);
+// Updating the featureCompatibilityVersion document with an invalid version fails.
+assert.writeErrorWithCode(
+ adminDB.system.version.update({_id: "featureCompatibilityVersion"}, {$set: {version: "3.2"}}),
+ ErrorCodes.BadValue);
+checkFCV(adminDB, latestFCV);
- // Updating the featureCompatibilityVersion document with an invalid targetVersion fails.
- assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {targetVersion: lastStableFCV}}),
- ErrorCodes.BadValue);
- checkFCV(adminDB, latestFCV);
+// Updating the featureCompatibilityVersion document with an invalid targetVersion fails.
+assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {targetVersion: lastStableFCV}}),
+ ErrorCodes.BadValue);
+checkFCV(adminDB, latestFCV);
- assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
- {$set: {targetVersion: latestFCV}}),
- ErrorCodes.BadValue);
- checkFCV(adminDB, latestFCV);
+assert.writeErrorWithCode(adminDB.system.version.update({_id: "featureCompatibilityVersion"},
+ {$set: {targetVersion: latestFCV}}),
+ ErrorCodes.BadValue);
+checkFCV(adminDB, latestFCV);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/filemd5_kill_during_yield.js b/jstests/noPassthrough/filemd5_kill_during_yield.js
index 250b6f23696..e2f74bcb1ce 100644
--- a/jstests/noPassthrough/filemd5_kill_during_yield.js
+++ b/jstests/noPassthrough/filemd5_kill_during_yield.js
@@ -2,47 +2,47 @@
// up the PlanExecutor without crashing the server. This test was designed to reproduce
// SERVER-35361.
(function() {
- "use strict";
-
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn);
- const db = conn.getDB("test");
- db.fs.chunks.drop();
- assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "64string")}));
- assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 1, data: new BinData(0, "test")}));
- db.fs.chunks.ensureIndex({files_id: 1, n: 1});
-
- const kFailPointName = "waitInFilemd5DuringManualYield";
- assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "alwaysOn"}));
-
- const failingMD5Shell =
- startParallelShell(() => assert.commandFailedWithCode(
- db.runCommand({filemd5: 1, root: "fs"}), ErrorCodes.Interrupted),
- conn.port);
-
- // Wait for filemd5 to manually yield and hang.
- let opId;
- assert.soon(
- () => {
- const filter = {ns: "test.fs.chunks", "command.filemd5": 1, msg: kFailPointName};
- const result =
- db.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: filter}]).toArray();
-
- if (result.length === 1) {
- opId = result[0].opid;
-
- return true;
- }
-
- return false;
- },
- () => "Failed to find operation in currentOp() output: " +
- tojson(db.currentOp({"ns": coll.getFullName()})));
-
- // Kill the operation, then disable the failpoint so the command recognizes it's been killed.
- assert.commandWorked(db.killOp(opId));
- assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
-
- failingMD5Shell();
- MongoRunner.stopMongod(conn);
+"use strict";
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn);
+const db = conn.getDB("test");
+db.fs.chunks.drop();
+assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "64string")}));
+assert.writeOK(db.fs.chunks.insert({files_id: 1, n: 1, data: new BinData(0, "test")}));
+db.fs.chunks.ensureIndex({files_id: 1, n: 1});
+
+const kFailPointName = "waitInFilemd5DuringManualYield";
+assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "alwaysOn"}));
+
+const failingMD5Shell =
+ startParallelShell(() => assert.commandFailedWithCode(db.runCommand({filemd5: 1, root: "fs"}),
+ ErrorCodes.Interrupted),
+ conn.port);
+
+// Wait for filemd5 to manually yield and hang.
+let opId;
+assert.soon(
+ () => {
+ const filter = {ns: "test.fs.chunks", "command.filemd5": 1, msg: kFailPointName};
+ const result =
+ db.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: filter}]).toArray();
+
+ if (result.length === 1) {
+ opId = result[0].opid;
+
+ return true;
+ }
+
+ return false;
+ },
+ () => "Failed to find operation in currentOp() output: " +
+ tojson(db.currentOp({"ns": coll.getFullName()})));
+
+// Kill the operation, then disable the failpoint so the command recognizes it's been killed.
+assert.commandWorked(db.killOp(opId));
+assert.commandWorked(db.adminCommand({configureFailPoint: kFailPointName, mode: "off"}));
+
+failingMD5Shell();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/find_by_uuid_and_rename.js b/jstests/noPassthrough/find_by_uuid_and_rename.js
index 206d9a0cbbd..6bdde57bd80 100644
--- a/jstests/noPassthrough/find_by_uuid_and_rename.js
+++ b/jstests/noPassthrough/find_by_uuid_and_rename.js
@@ -3,60 +3,59 @@
//
(function() {
- "use strict";
- const dbName = "do_concurrent_rename";
- const collName = "collA";
- const otherName = "collB";
- const repeatFind = 100;
- load("jstests/noPassthrough/libs/concurrent_rename.js");
- load("jstests/libs/parallel_shell_helpers.js");
-
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
- jsTestLog("Create collection.");
- let findRenameDB = conn.getDB(dbName);
- findRenameDB.dropDatabase();
- assert.commandWorked(findRenameDB.runCommand({"create": collName}));
- assert.commandWorked(
- findRenameDB.runCommand({insert: collName, documents: [{fooField: 'FOO'}]}));
-
- let infos = findRenameDB.getCollectionInfos();
- let uuid = infos[0].info.uuid;
- const findCmd = {"find": uuid};
-
- // Assert 'find' command by UUID works.
- assert.commandWorked(findRenameDB.runCommand(findCmd));
-
- jsTestLog("Start parallel shell for renames.");
- let renameShell =
- startParallelShell(funWithArgs(doRenames, dbName, collName, otherName), conn.port);
-
- // Wait until we receive confirmation that the parallel shell has started.
- assert.soon(() => conn.getDB("test").await_data.findOne({_id: "signal parent shell"}) !== null,
- "Expected parallel shell to insert a document.");
-
- jsTestLog("Start 'find' commands.");
- while (conn.getDB("test").await_data.findOne({_id: "rename has ended"}) == null) {
- for (let i = 0; i < repeatFind; i++) {
- let res = findRenameDB.runCommand(findCmd);
-
- // This is an acceptable transient error until SERVER-31695 has been completed.
- if (res.code === ErrorCodes.QueryPlanKilled) {
- print("Ignoring transient QueryPlanKilled error: " + res.errmsg);
- continue;
- }
- assert.commandWorked(res, "could not run " + tojson(findCmd));
- let cursor = new DBCommandCursor(findRenameDB, res);
- let errMsg = "expected more data from command " + tojson(findCmd) + ", with result " +
- tojson(res);
- assert(cursor.hasNext(), errMsg);
- let doc = cursor.next();
- assert.eq(doc.fooField, "FOO");
- assert(!cursor.hasNext(),
- "expected to have exhausted cursor for results " + tojson(res));
+"use strict";
+const dbName = "do_concurrent_rename";
+const collName = "collA";
+const otherName = "collB";
+const repeatFind = 100;
+load("jstests/noPassthrough/libs/concurrent_rename.js");
+load("jstests/libs/parallel_shell_helpers.js");
+
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+jsTestLog("Create collection.");
+let findRenameDB = conn.getDB(dbName);
+findRenameDB.dropDatabase();
+assert.commandWorked(findRenameDB.runCommand({"create": collName}));
+assert.commandWorked(findRenameDB.runCommand({insert: collName, documents: [{fooField: 'FOO'}]}));
+
+let infos = findRenameDB.getCollectionInfos();
+let uuid = infos[0].info.uuid;
+const findCmd = {
+ "find": uuid
+};
+
+// Assert 'find' command by UUID works.
+assert.commandWorked(findRenameDB.runCommand(findCmd));
+
+jsTestLog("Start parallel shell for renames.");
+let renameShell =
+ startParallelShell(funWithArgs(doRenames, dbName, collName, otherName), conn.port);
+
+// Wait until we receive confirmation that the parallel shell has started.
+assert.soon(() => conn.getDB("test").await_data.findOne({_id: "signal parent shell"}) !== null,
+ "Expected parallel shell to insert a document.");
+
+jsTestLog("Start 'find' commands.");
+while (conn.getDB("test").await_data.findOne({_id: "rename has ended"}) == null) {
+ for (let i = 0; i < repeatFind; i++) {
+ let res = findRenameDB.runCommand(findCmd);
+
+ // This is an acceptable transient error until SERVER-31695 has been completed.
+ if (res.code === ErrorCodes.QueryPlanKilled) {
+ print("Ignoring transient QueryPlanKilled error: " + res.errmsg);
+ continue;
}
+ assert.commandWorked(res, "could not run " + tojson(findCmd));
+ let cursor = new DBCommandCursor(findRenameDB, res);
+ let errMsg =
+ "expected more data from command " + tojson(findCmd) + ", with result " + tojson(res);
+ assert(cursor.hasNext(), errMsg);
+ let doc = cursor.next();
+ assert.eq(doc.fooField, "FOO");
+ assert(!cursor.hasNext(), "expected to have exhausted cursor for results " + tojson(res));
}
- renameShell();
- MongoRunner.stopMongod(conn);
-
+}
+renameShell();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/flow_control_logging.js b/jstests/noPassthrough/flow_control_logging.js
index 0d4744b4559..bd3478aa7dd 100644
--- a/jstests/noPassthrough/flow_control_logging.js
+++ b/jstests/noPassthrough/flow_control_logging.js
@@ -8,51 +8,47 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const replSet = new ReplSetTest({name: "flow_control_logging", nodes: 3});
- replSet.startSet({
- setParameter: {
- flowControlSamplePeriod:
- 1, // Increase resolution to detect lag in a light write workload.
- flowControlWarnThresholdSeconds: 1,
- // Configure flow control to engage after one second of lag.
- flowControlTargetLagSeconds: 1,
- flowControlThresholdLagPercentage: 1,
- // Use a speedy no-op writer to avoid needing a robust background writer.
- writePeriodicNoops: true,
- periodicNoopIntervalSecs:
- 2 // replSet.initiate() can hang with a one second interval for reasons.
- }
- });
- replSet.initiate();
-
- // Stop replication which will pin the commit point.
- for (let sec of replSet.getSecondaries()) {
- assert.commandWorked(sec.adminCommand({
- configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries",
- mode: "alwaysOn"
- }));
+const replSet = new ReplSetTest({name: "flow_control_logging", nodes: 3});
+replSet.startSet({
+ setParameter: {
+ flowControlSamplePeriod: 1, // Increase resolution to detect lag in a light write workload.
+ flowControlWarnThresholdSeconds: 1,
+ // Configure flow control to engage after one second of lag.
+ flowControlTargetLagSeconds: 1,
+ flowControlThresholdLagPercentage: 1,
+ // Use a speedy no-op writer to avoid needing a robust background writer.
+ writePeriodicNoops: true,
+ periodicNoopIntervalSecs:
+ 2 // replSet.initiate() can hang with a one second interval for reasons.
}
+});
+replSet.initiate();
- const timeoutMilliseconds = 30 * 1000;
- // The test has stopped replication and the primary's no-op writer is configured to create an
- // oplog entry every other second. Once the primary notices the sustainer rate is not moving, it
- // should start logging a warning once per second. This check waits for two log messages to make
- // sure the appropriate state variables are being reset.
- checkLog.containsWithAtLeastCount(
- replSet.getPrimary(),
- "Flow control is engaged and the sustainer point is not moving.",
- 2,
- timeoutMilliseconds);
+// Stop replication which will pin the commit point.
+for (let sec of replSet.getSecondaries()) {
+ assert.commandWorked(sec.adminCommand(
+ {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "alwaysOn"}));
+}
- // Restart replication so the replica set will shut down.
- for (let sec of replSet.getSecondaries()) {
- assert.commandWorked(sec.adminCommand(
- {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "off"}));
- }
+const timeoutMilliseconds = 30 * 1000;
+// The test has stopped replication and the primary's no-op writer is configured to create an
+// oplog entry every other second. Once the primary notices the sustainer rate is not moving, it
+// should start logging a warning once per second. This check waits for two log messages to make
+// sure the appropriate state variables are being reset.
+checkLog.containsWithAtLeastCount(replSet.getPrimary(),
+ "Flow control is engaged and the sustainer point is not moving.",
+ 2,
+ timeoutMilliseconds);
+
+// Restart replication so the replica set will shut down.
+for (let sec of replSet.getSecondaries()) {
+ assert.commandWorked(sec.adminCommand(
+ {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "off"}));
+}
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/flow_control_replica_set.js b/jstests/noPassthrough/flow_control_replica_set.js
index 43fa022b284..025c04e4e5b 100644
--- a/jstests/noPassthrough/flow_control_replica_set.js
+++ b/jstests/noPassthrough/flow_control_replica_set.js
@@ -12,55 +12,55 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
+const primary = replTest.getPrimary();
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: "flowControlTicketOverride",
- mode: "alwaysOn",
- data: {"numTickets": 1000 * 1000 * 1000}
- }));
- // Sleep 2 seconds for the failpoint to take effect.
- sleep(2000);
+assert.commandWorked(primary.adminCommand({
+ configureFailPoint: "flowControlTicketOverride",
+ mode: "alwaysOn",
+ data: {"numTickets": 1000 * 1000 * 1000}
+}));
+// Sleep 2 seconds for the failpoint to take effect.
+sleep(2000);
- let result = benchRun({
- host: primary.host,
- seconds: 5,
- parallel: 5,
- ops: [{op: "insert", ns: "foo.bar", doc: {field: "value"}}]
- });
- jsTestLog({CalibratingRun: result});
+let result = benchRun({
+ host: primary.host,
+ seconds: 5,
+ parallel: 5,
+ ops: [{op: "insert", ns: "foo.bar", doc: {field: "value"}}]
+});
+jsTestLog({CalibratingRun: result});
- let insertRate = result["insert"];
- let throttledRate = insertRate / 2;
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: "flowControlTicketOverride",
- mode: "alwaysOn",
- data: {"numTickets": NumberInt(throttledRate)}
- }));
- // Sleep 2 seconds for the failpoint to take effect.
- sleep(2000);
+let insertRate = result["insert"];
+let throttledRate = insertRate / 2;
+assert.commandWorked(primary.adminCommand({
+ configureFailPoint: "flowControlTicketOverride",
+ mode: "alwaysOn",
+ data: {"numTickets": NumberInt(throttledRate)}
+}));
+// Sleep 2 seconds for the failpoint to take effect.
+sleep(2000);
- result = benchRun({
- host: primary.host,
- seconds: 5,
- parallel: 5,
- ops: [{op: "insert", ns: "foo.bar", doc: {field: "value"}}]
- });
- jsTestLog({ThrottledRun: result, ThrottedRate: throttledRate});
- let maxAllowedRate = 1.5 * throttledRate;
- let minAllowedRate = 0.5 * throttledRate;
- assert.gt(result["insert"], minAllowedRate);
- assert.lt(result["insert"], maxAllowedRate);
+result = benchRun({
+ host: primary.host,
+ seconds: 5,
+ parallel: 5,
+ ops: [{op: "insert", ns: "foo.bar", doc: {field: "value"}}]
+});
+jsTestLog({ThrottledRun: result, ThrottedRate: throttledRate});
+let maxAllowedRate = 1.5 * throttledRate;
+let minAllowedRate = 0.5 * throttledRate;
+assert.gt(result["insert"], minAllowedRate);
+assert.lt(result["insert"], maxAllowedRate);
- // Cautiously unset to avoid any interaction with shutdown.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "flowControlTicketOverride", mode: "off"}));
+// Cautiously unset to avoid any interaction with shutdown.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "flowControlTicketOverride", mode: "off"}));
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/noPassthrough/ftdc_connection_pool.js b/jstests/noPassthrough/ftdc_connection_pool.js
index f1fb7336aa0..cd284fed4a5 100644
--- a/jstests/noPassthrough/ftdc_connection_pool.js
+++ b/jstests/noPassthrough/ftdc_connection_pool.js
@@ -7,30 +7,30 @@
load('jstests/libs/ftdc.js');
(function() {
- 'use strict';
- const testPath = MongoRunner.toRealPath('ftdc_dir');
- const st = new ShardingTest({
- shards: 2,
- mongos: {
- s0: {setParameter: {diagnosticDataCollectionDirectoryPath: testPath}},
- }
- });
+'use strict';
+const testPath = MongoRunner.toRealPath('ftdc_dir');
+const st = new ShardingTest({
+ shards: 2,
+ mongos: {
+ s0: {setParameter: {diagnosticDataCollectionDirectoryPath: testPath}},
+ }
+});
- const admin = st.s0.getDB('admin');
- const stats = verifyGetDiagnosticData(admin).connPoolStats;
- jsTestLog(`Diagnostic connection pool stats: ${tojson(stats)}`);
+const admin = st.s0.getDB('admin');
+const stats = verifyGetDiagnosticData(admin).connPoolStats;
+jsTestLog(`Diagnostic connection pool stats: ${tojson(stats)}`);
- assert(stats.hasOwnProperty('totalInUse'));
- assert(stats.hasOwnProperty('totalAvailable'));
- assert(stats.hasOwnProperty('totalCreated'));
- assert(stats.hasOwnProperty('totalRefreshing'));
+assert(stats.hasOwnProperty('totalInUse'));
+assert(stats.hasOwnProperty('totalAvailable'));
+assert(stats.hasOwnProperty('totalCreated'));
+assert(stats.hasOwnProperty('totalRefreshing'));
- // The connPoolStats command reply has "hosts", but FTDC's stats do not.
- assert(!stats.hasOwnProperty('hosts'));
+// The connPoolStats command reply has "hosts", but FTDC's stats do not.
+assert(!stats.hasOwnProperty('hosts'));
- // Check a few properties, without attempting to be thorough.
- assert(stats.connectionsInUsePerPool.hasOwnProperty('NetworkInterfaceTL-ShardRegistry'));
- assert(stats.replicaSetPingTimesMillis.hasOwnProperty(st.configRS.name));
+// Check a few properties, without attempting to be thorough.
+assert(stats.connectionsInUsePerPool.hasOwnProperty('NetworkInterfaceTL-ShardRegistry'));
+assert(stats.replicaSetPingTimesMillis.hasOwnProperty(st.configRS.name));
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/ftdc_setdirectory.js b/jstests/noPassthrough/ftdc_setdirectory.js
index 33877883fa3..caace9a9343 100644
--- a/jstests/noPassthrough/ftdc_setdirectory.js
+++ b/jstests/noPassthrough/ftdc_setdirectory.js
@@ -5,140 +5,136 @@
load('jstests/libs/ftdc.js');
(function() {
- 'use strict';
- let testPath1 = MongoRunner.toRealPath('ftdc_setdir1');
- let testPath2 = MongoRunner.toRealPath('ftdc_setdir2');
- let testPath3 = MongoRunner.toRealPath('ftdc_setdir3');
- // SERVER-30394: Use a directory relative to the current working directory.
- let testPath4 = 'ftdc_setdir4/';
- let testLog3 = testPath3 + "mongos_ftdc.log";
- let testLog4 = testPath4 + "mongos_ftdc.log";
-
- // Make the log file directory for mongos.
- mkdir(testPath3);
- mkdir(testPath4);
-
- // Startup 3 mongos:
- // 1. Normal MongoS with no log file to verify FTDC can be startup at runtime with a path.
- // 2. MongoS with explict diagnosticDataCollectionDirectoryPath setParameter at startup.
- // 3. MongoS with log file to verify automatic FTDC path computation works.
- let st = new ShardingTest({
- shards: 1,
- mongos: {
- s0: {verbose: 0},
- s1: {setParameter: {diagnosticDataCollectionDirectoryPath: testPath2}},
- s2: {logpath: testLog3},
- s3: {logpath: testLog4}
- }
- });
-
- let admin1 = st.s0.getDB('admin');
- let admin2 = st.s1.getDB('admin');
- let admin3 = st.s2.getDB('admin');
- let admin4 = st.s3.getDB('admin');
-
- function setParam(admin, obj) {
- var ret = admin.runCommand(Object.extend({setParameter: 1}, obj));
- return ret;
+'use strict';
+let testPath1 = MongoRunner.toRealPath('ftdc_setdir1');
+let testPath2 = MongoRunner.toRealPath('ftdc_setdir2');
+let testPath3 = MongoRunner.toRealPath('ftdc_setdir3');
+// SERVER-30394: Use a directory relative to the current working directory.
+let testPath4 = 'ftdc_setdir4/';
+let testLog3 = testPath3 + "mongos_ftdc.log";
+let testLog4 = testPath4 + "mongos_ftdc.log";
+
+// Make the log file directory for mongos.
+mkdir(testPath3);
+mkdir(testPath4);
+
+// Startup 3 mongos:
+// 1. Normal MongoS with no log file to verify FTDC can be startup at runtime with a path.
+// 2. MongoS with explict diagnosticDataCollectionDirectoryPath setParameter at startup.
+// 3. MongoS with log file to verify automatic FTDC path computation works.
+let st = new ShardingTest({
+ shards: 1,
+ mongos: {
+ s0: {verbose: 0},
+ s1: {setParameter: {diagnosticDataCollectionDirectoryPath: testPath2}},
+ s2: {logpath: testLog3},
+ s3: {logpath: testLog4}
}
+});
- function getParam(admin, field) {
- var q = {getParameter: 1};
- q[field] = 1;
+let admin1 = st.s0.getDB('admin');
+let admin2 = st.s1.getDB('admin');
+let admin3 = st.s2.getDB('admin');
+let admin4 = st.s3.getDB('admin');
- var ret = admin.runCommand(q);
- assert.commandWorked(ret);
- return ret[field];
- }
+function setParam(admin, obj) {
+ var ret = admin.runCommand(Object.extend({setParameter: 1}, obj));
+ return ret;
+}
- // Verify FTDC can be started at runtime.
- function verifyFTDCDisabledOnStartup() {
- jsTestLog("Running verifyFTDCDisabledOnStartup");
- verifyCommonFTDCParameters(admin1, false);
+function getParam(admin, field) {
+ var q = {getParameter: 1};
+ q[field] = 1;
- // 1. Try to enable and fail
- assert.commandFailed(setParam(admin1, {"diagnosticDataCollectionEnabled": 1}));
+ var ret = admin.runCommand(q);
+ assert.commandWorked(ret);
+ return ret[field];
+}
- // 2. Set path and succeed
- assert.commandWorked(
- setParam(admin1, {"diagnosticDataCollectionDirectoryPath": testPath1}));
+// Verify FTDC can be started at runtime.
+function verifyFTDCDisabledOnStartup() {
+ jsTestLog("Running verifyFTDCDisabledOnStartup");
+ verifyCommonFTDCParameters(admin1, false);
- // 3. Set path again and fail
- assert.commandFailed(
- setParam(admin1, {"diagnosticDataCollectionDirectoryPath": testPath1}));
+ // 1. Try to enable and fail
+ assert.commandFailed(setParam(admin1, {"diagnosticDataCollectionEnabled": 1}));
- // 4. Enable successfully
- assert.commandWorked(setParam(admin1, {"diagnosticDataCollectionEnabled": 1}));
+ // 2. Set path and succeed
+ assert.commandWorked(setParam(admin1, {"diagnosticDataCollectionDirectoryPath": testPath1}));
- // 5. Validate getDiagnosticData returns FTDC data now
- jsTestLog("Verifying FTDC getDiagnosticData");
- verifyGetDiagnosticData(admin1);
- }
+ // 3. Set path again and fail
+ assert.commandFailed(setParam(admin1, {"diagnosticDataCollectionDirectoryPath": testPath1}));
- // Verify FTDC is already running if there was a path set at startup.
- function verifyFTDCStartsWithPath() {
- jsTestLog("Running verifyFTDCStartsWithPath");
- verifyCommonFTDCParameters(admin2, true);
+ // 4. Enable successfully
+ assert.commandWorked(setParam(admin1, {"diagnosticDataCollectionEnabled": 1}));
- // 1. Set path fail
- assert.commandFailed(
- setParam(admin2, {"diagnosticDataCollectionDirectoryPath": testPath2}));
+ // 5. Validate getDiagnosticData returns FTDC data now
+ jsTestLog("Verifying FTDC getDiagnosticData");
+ verifyGetDiagnosticData(admin1);
+}
- // 2. Enable successfully
- assert.commandWorked(setParam(admin2, {"diagnosticDataCollectionEnabled": 1}));
+// Verify FTDC is already running if there was a path set at startup.
+function verifyFTDCStartsWithPath() {
+ jsTestLog("Running verifyFTDCStartsWithPath");
+ verifyCommonFTDCParameters(admin2, true);
- // 3. Validate getDiagnosticData returns FTDC data now
- jsTestLog("Verifying FTDC getDiagnosticData");
- verifyGetDiagnosticData(admin2);
- }
+ // 1. Set path fail
+ assert.commandFailed(setParam(admin2, {"diagnosticDataCollectionDirectoryPath": testPath2}));
- function normpath(path) {
- // On Windows, strip the drive path because MongoRunner.toRealPath() returns a Unix Path
- // while FTDC returns a Windows path.
- return path.replace(/\\/g, "/").replace(/\w:/, "");
- }
+ // 2. Enable successfully
+ assert.commandWorked(setParam(admin2, {"diagnosticDataCollectionEnabled": 1}));
- // Verify FTDC is already running if there was a path set at startup.
- function verifyFTDCStartsWithLogFile() {
- jsTestLog("Running verifyFTDCStartsWithLogFile");
- verifyCommonFTDCParameters(admin3, true);
+ // 3. Validate getDiagnosticData returns FTDC data now
+ jsTestLog("Verifying FTDC getDiagnosticData");
+ verifyGetDiagnosticData(admin2);
+}
- // 1. Verify that path is computed correctly.
- let computedPath = getParam(admin3, "diagnosticDataCollectionDirectoryPath");
- assert.eq(normpath(computedPath), normpath(testPath3 + "mongos_ftdc.diagnostic.data"));
+function normpath(path) {
+ // On Windows, strip the drive path because MongoRunner.toRealPath() returns a Unix Path
+ // while FTDC returns a Windows path.
+ return path.replace(/\\/g, "/").replace(/\w:/, "");
+}
- // 2. Set path fail
- assert.commandFailed(
- setParam(admin3, {"diagnosticDataCollectionDirectoryPath": testPath3}));
+// Verify FTDC is already running if there was a path set at startup.
+function verifyFTDCStartsWithLogFile() {
+ jsTestLog("Running verifyFTDCStartsWithLogFile");
+ verifyCommonFTDCParameters(admin3, true);
- // 3. Enable successfully
- assert.commandWorked(setParam(admin3, {"diagnosticDataCollectionEnabled": 1}));
+ // 1. Verify that path is computed correctly.
+ let computedPath = getParam(admin3, "diagnosticDataCollectionDirectoryPath");
+ assert.eq(normpath(computedPath), normpath(testPath3 + "mongos_ftdc.diagnostic.data"));
- // 4. Validate getDiagnosticData returns FTDC data now
- jsTestLog("Verifying FTDC getDiagnosticData");
- verifyGetDiagnosticData(admin3);
- }
+ // 2. Set path fail
+ assert.commandFailed(setParam(admin3, {"diagnosticDataCollectionDirectoryPath": testPath3}));
- // Verify FTDC is already running if there is a relative log file path.
- function verifyFTDCStartsWithRelativeLogFile() {
- jsTestLog("Running verifyFTDCStartsWithRelativeLogFile");
- verifyCommonFTDCParameters(admin4, true);
+ // 3. Enable successfully
+ assert.commandWorked(setParam(admin3, {"diagnosticDataCollectionEnabled": 1}));
- // Skip verification of diagnosticDataCollectionDirectoryPath because it relies on comparing
- // cwd vs dbPath.
+ // 4. Validate getDiagnosticData returns FTDC data now
+ jsTestLog("Verifying FTDC getDiagnosticData");
+ verifyGetDiagnosticData(admin3);
+}
- // 1. Enable successfully
- assert.commandWorked(setParam(admin4, {"diagnosticDataCollectionEnabled": 1}));
+// Verify FTDC is already running if there is a relative log file path.
+function verifyFTDCStartsWithRelativeLogFile() {
+ jsTestLog("Running verifyFTDCStartsWithRelativeLogFile");
+ verifyCommonFTDCParameters(admin4, true);
- // 2. Validate getDiagnosticData returns FTDC data now
- jsTestLog("Verifying FTDC getDiagnosticData");
- verifyGetDiagnosticData(admin4);
- }
+ // Skip verification of diagnosticDataCollectionDirectoryPath because it relies on comparing
+ // cwd vs dbPath.
+
+ // 1. Enable successfully
+ assert.commandWorked(setParam(admin4, {"diagnosticDataCollectionEnabled": 1}));
+
+ // 2. Validate getDiagnosticData returns FTDC data now
+ jsTestLog("Verifying FTDC getDiagnosticData");
+ verifyGetDiagnosticData(admin4);
+}
- verifyFTDCDisabledOnStartup();
- verifyFTDCStartsWithPath();
- verifyFTDCStartsWithLogFile();
- verifyFTDCStartsWithRelativeLogFile();
+verifyFTDCDisabledOnStartup();
+verifyFTDCStartsWithPath();
+verifyFTDCStartsWithLogFile();
+verifyFTDCStartsWithRelativeLogFile();
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/ftdc_setparam.js b/jstests/noPassthrough/ftdc_setparam.js
index 4e9b2459311..8a810807541 100644
--- a/jstests/noPassthrough/ftdc_setparam.js
+++ b/jstests/noPassthrough/ftdc_setparam.js
@@ -1,19 +1,19 @@
// validate command line ftdc parameter parsing
(function() {
- 'use strict';
- var m = MongoRunner.runMongod({setParameter: "diagnosticDataCollectionPeriodMillis=101"});
+'use strict';
+var m = MongoRunner.runMongod({setParameter: "diagnosticDataCollectionPeriodMillis=101"});
- // Check the defaults are correct
- //
- function getparam(field) {
- var q = {getParameter: 1};
- q[field] = 1;
+// Check the defaults are correct
+//
+function getparam(field) {
+ var q = {getParameter: 1};
+ q[field] = 1;
- var ret = m.getDB("admin").runCommand(q);
- return ret[field];
- }
+ var ret = m.getDB("admin").runCommand(q);
+ return ret[field];
+}
- assert.eq(getparam("diagnosticDataCollectionPeriodMillis"), 101);
- MongoRunner.stopMongod(m);
+assert.eq(getparam("diagnosticDataCollectionPeriodMillis"), 101);
+MongoRunner.stopMongod(m);
})();
diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js
index 7bebdce9dba..7ffd8e90c50 100644
--- a/jstests/noPassthrough/geo_full.js
+++ b/jstests/noPassthrough/geo_full.js
@@ -19,549 +19,533 @@
//
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/geo_math.js");
+load("jstests/libs/geo_math.js");
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- const db = conn.getDB("test");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+const db = conn.getDB("test");
- var randEnvironment = function() {
-
- // Normal earth environment
- if (Random.rand() < 0.5) {
- return {
- max: 180,
- min: -180,
- bits: Math.floor(Random.rand() * 32) + 1,
- earth: true,
- bucketSize: 360 / (4 * 1024 * 1024 * 1024)
- };
- }
-
- var scales = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000];
- var scale = scales[Math.floor(Random.rand() * scales.length)];
- var offset = Random.rand() * scale;
-
- var max = Random.rand() * scale + offset;
- var min = -Random.rand() * scale + offset;
- var bits = Math.floor(Random.rand() * 32) + 1;
- var bits = Math.floor(Random.rand() * 32) + 1;
- var range = max - min;
- var bucketSize = range / (4 * 1024 * 1024 * 1024);
-
- return {max: max, min: min, bits: bits, earth: false, bucketSize: bucketSize};
- };
+var randEnvironment = function() {
+ // Normal earth environment
+ if (Random.rand() < 0.5) {
+ return {
+ max: 180,
+ min: -180,
+ bits: Math.floor(Random.rand() * 32) + 1,
+ earth: true,
+ bucketSize: 360 / (4 * 1024 * 1024 * 1024)
+ };
+ }
- var randPoint = function(env, query) {
+ var scales = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000];
+ var scale = scales[Math.floor(Random.rand() * scales.length)];
+ var offset = Random.rand() * scale;
- if (query && Random.rand() > 0.5)
- return query.exact;
+ var max = Random.rand() * scale + offset;
+ var min = -Random.rand() * scale + offset;
+ var bits = Math.floor(Random.rand() * 32) + 1;
+ var bits = Math.floor(Random.rand() * 32) + 1;
+ var range = max - min;
+ var bucketSize = range / (4 * 1024 * 1024 * 1024);
- if (env.earth)
- return [Random.rand() * 360 - 180, Random.rand() * 180 - 90];
+ return {max: max, min: min, bits: bits, earth: false, bucketSize: bucketSize};
+};
- var range = env.max - env.min;
- return [Random.rand() * range + env.min, Random.rand() * range + env.min];
- };
+var randPoint = function(env, query) {
+ if (query && Random.rand() > 0.5)
+ return query.exact;
- var randLocType = function(loc, wrapIn) {
- return randLocTypes([loc], wrapIn)[0];
- };
+ if (env.earth)
+ return [Random.rand() * 360 - 180, Random.rand() * 180 - 90];
- var randLocTypes = function(locs, wrapIn) {
+ var range = env.max - env.min;
+ return [Random.rand() * range + env.min, Random.rand() * range + env.min];
+};
- var rLocs = [];
+var randLocType = function(loc, wrapIn) {
+ return randLocTypes([loc], wrapIn)[0];
+};
- for (var i = 0; i < locs.length; i++) {
- rLocs.push(locs[i]);
- }
+var randLocTypes = function(locs, wrapIn) {
+ var rLocs = [];
- if (wrapIn) {
- var wrappedLocs = [];
- for (var i = 0; i < rLocs.length; i++) {
- var wrapper = {};
- wrapper[wrapIn] = rLocs[i];
- wrappedLocs.push(wrapper);
- }
+ for (var i = 0; i < locs.length; i++) {
+ rLocs.push(locs[i]);
+ }
- return wrappedLocs;
+ if (wrapIn) {
+ var wrappedLocs = [];
+ for (var i = 0; i < rLocs.length; i++) {
+ var wrapper = {};
+ wrapper[wrapIn] = rLocs[i];
+ wrappedLocs.push(wrapper);
}
- return rLocs;
- };
-
- var randDataType = function() {
-
- var scales = [1, 10, 100, 1000, 10000];
- var docScale = scales[Math.floor(Random.rand() * scales.length)];
- var locScale = scales[Math.floor(Random.rand() * scales.length)];
+ return wrappedLocs;
+ }
- var numDocs = 40000;
- var maxLocs = 40000;
- // Make sure we don't blow past our test resources
- while (numDocs * maxLocs > 40000) {
- numDocs = Math.floor(Random.rand() * docScale) + 1;
- maxLocs = Math.floor(Random.rand() * locScale) + 1;
- }
+ return rLocs;
+};
- return {numDocs: numDocs, maxLocs: maxLocs};
- };
+var randDataType = function() {
+ var scales = [1, 10, 100, 1000, 10000];
+ var docScale = scales[Math.floor(Random.rand() * scales.length)];
+ var locScale = scales[Math.floor(Random.rand() * scales.length)];
- function computexscandist(latDegrees, maxDistDegrees) {
- // See s2cap.cc
- //
- // Compute the range of longitudes covered by the cap. We use the law
- // of sines for spherical triangles. Consider the triangle ABC where
- // A is the north pole, B is the center of the cap, and C is the point
- // of tangency between the cap boundary and a line of longitude. Then
- // C is a right angle, and letting a,b,c denote the sides opposite A,B,C,
- // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c).
- // Here "a" is the cap angle, and "c" is the colatitude (90 degrees
- // minus the latitude). This formula also works for negative latitudes.
- //
- // Angle A is the difference of longitudes of B and C.
- var sin_c = Math.cos(deg2rad(latDegrees));
- var sin_a = Math.sin(deg2rad(maxDistDegrees));
- if (sin_a > sin_c) {
- // Double floating number error, return invalid distance
- return 180;
- }
- var angleA = Math.asin(sin_a / sin_c);
- return rad2deg(angleA);
+ var numDocs = 40000;
+ var maxLocs = 40000;
+ // Make sure we don't blow past our test resources
+ while (numDocs * maxLocs > 40000) {
+ numDocs = Math.floor(Random.rand() * docScale) + 1;
+ maxLocs = Math.floor(Random.rand() * locScale) + 1;
}
- function errorMarginForPoint(env) {
- if (!env.bits) {
- return 0.01;
- }
- var scalingFactor = Math.pow(2, env.bits);
- return ((env.max - env.min) / scalingFactor) * Math.sqrt(2);
+ return {numDocs: numDocs, maxLocs: maxLocs};
+};
+
+function computexscandist(latDegrees, maxDistDegrees) {
+ // See s2cap.cc
+ //
+ // Compute the range of longitudes covered by the cap. We use the law
+ // of sines for spherical triangles. Consider the triangle ABC where
+ // A is the north pole, B is the center of the cap, and C is the point
+ // of tangency between the cap boundary and a line of longitude. Then
+ // C is a right angle, and letting a,b,c denote the sides opposite A,B,C,
+ // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c).
+ // Here "a" is the cap angle, and "c" is the colatitude (90 degrees
+ // minus the latitude). This formula also works for negative latitudes.
+ //
+ // Angle A is the difference of longitudes of B and C.
+ var sin_c = Math.cos(deg2rad(latDegrees));
+ var sin_a = Math.sin(deg2rad(maxDistDegrees));
+ if (sin_a > sin_c) {
+ // Double floating number error, return invalid distance
+ return 180;
}
+ var angleA = Math.asin(sin_a / sin_c);
+ return rad2deg(angleA);
+}
- function pointIsOK(startPoint, radius, env) {
- var error = errorMarginForPoint(env);
- var distDegrees = rad2deg(radius) + error;
- // TODO SERVER-24440: Points close to the north and south poles may fail to be returned by
- // $nearSphere queries answered using a "2d" index. We have empirically found that points
- // with latitudes between 89 and 90 degrees are potentially affected by this issue, so we
- // additionally reject any coordinates with a latitude that falls within that range.
- if ((startPoint[1] + distDegrees > 89) || (startPoint[1] - distDegrees < -89)) {
- return false;
- }
- var xscandist = computexscandist(startPoint[1], distDegrees);
- return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180);
+function errorMarginForPoint(env) {
+ if (!env.bits) {
+ return 0.01;
}
-
- var randQuery = function(env) {
- var center = randPoint(env);
-
- var sphereRadius = -1;
- var sphereCenter = null;
- if (env.earth) {
- // Get a start point that doesn't require wrapping
- // TODO: Are we a bit too aggressive with wrapping issues?
- var i;
- for (i = 0; i < 5; i++) {
- sphereRadius = Random.rand() * 45 * Math.PI / 180;
- sphereCenter = randPoint(env);
- if (pointIsOK(sphereCenter, sphereRadius, env)) {
- break;
- }
+ var scalingFactor = Math.pow(2, env.bits);
+ return ((env.max - env.min) / scalingFactor) * Math.sqrt(2);
+}
+
+function pointIsOK(startPoint, radius, env) {
+ var error = errorMarginForPoint(env);
+ var distDegrees = rad2deg(radius) + error;
+ // TODO SERVER-24440: Points close to the north and south poles may fail to be returned by
+ // $nearSphere queries answered using a "2d" index. We have empirically found that points
+ // with latitudes between 89 and 90 degrees are potentially affected by this issue, so we
+ // additionally reject any coordinates with a latitude that falls within that range.
+ if ((startPoint[1] + distDegrees > 89) || (startPoint[1] - distDegrees < -89)) {
+ return false;
+ }
+ var xscandist = computexscandist(startPoint[1], distDegrees);
+ return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180);
+}
+
+var randQuery = function(env) {
+ var center = randPoint(env);
+
+ var sphereRadius = -1;
+ var sphereCenter = null;
+ if (env.earth) {
+ // Get a start point that doesn't require wrapping
+ // TODO: Are we a bit too aggressive with wrapping issues?
+ var i;
+ for (i = 0; i < 5; i++) {
+ sphereRadius = Random.rand() * 45 * Math.PI / 180;
+ sphereCenter = randPoint(env);
+ if (pointIsOK(sphereCenter, sphereRadius, env)) {
+ break;
}
- if (i == 5)
- sphereRadius = -1;
}
+ if (i == 5)
+ sphereRadius = -1;
+ }
- var box = [randPoint(env), randPoint(env)];
-
- var boxPoly = [
- [box[0][0], box[0][1]],
- [box[0][0], box[1][1]],
- [box[1][0], box[1][1]],
- [box[1][0], box[0][1]]
- ];
-
- if (box[0][0] > box[1][0]) {
- var swap = box[0][0];
- box[0][0] = box[1][0];
- box[1][0] = swap;
- }
-
- if (box[0][1] > box[1][1]) {
- var swap = box[0][1];
- box[0][1] = box[1][1];
- box[1][1] = swap;
- }
-
- return {
- center: center,
- radius: box[1][0] - box[0][0],
- exact: randPoint(env),
- sphereCenter: sphereCenter,
- sphereRadius: sphereRadius,
- box: box,
- boxPoly: boxPoly
- };
- };
-
- var resultTypes = {
- "exact": function(loc) {
- return query.exact[0] == loc[0] && query.exact[1] == loc[1];
- },
- "center": function(loc) {
- return Geo.distance(query.center, loc) <= query.radius;
- },
- "box": function(loc) {
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
-
- },
- "sphere": function(loc) {
- return (query.sphereRadius >= 0
- ? (Geo.sphereDistance(query.sphereCenter, loc) <= query.sphereRadius)
- : false);
- },
- "poly": function(loc) {
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
- }
- };
-
- var queryResults = function(locs, query, results) {
+ var box = [randPoint(env), randPoint(env)];
- if (!results["center"]) {
- for (var type in resultTypes) {
- results[type] = {docsIn: 0, docsOut: 0, locsIn: 0, locsOut: 0};
- }
- }
+ var boxPoly = [
+ [box[0][0], box[0][1]],
+ [box[0][0], box[1][1]],
+ [box[1][0], box[1][1]],
+ [box[1][0], box[0][1]]
+ ];
- var indResults = {};
- for (var type in resultTypes) {
- indResults[type] = {docIn: false, locsIn: 0, locsOut: 0};
- }
-
- for (var type in resultTypes) {
- var docIn = false;
- for (var i = 0; i < locs.length; i++) {
- if (resultTypes[type](locs[i])) {
- results[type].locsIn++;
- indResults[type].locsIn++;
- indResults[type].docIn = true;
- } else {
- results[type].locsOut++;
- indResults[type].locsOut++;
- }
- }
- if (indResults[type].docIn)
- results[type].docsIn++;
- else
- results[type].docsOut++;
- }
-
- return indResults;
- };
+ if (box[0][0] > box[1][0]) {
+ var swap = box[0][0];
+ box[0][0] = box[1][0];
+ box[1][0] = swap;
+ }
- var randQueryAdditions = function(doc, indResults) {
+ if (box[0][1] > box[1][1]) {
+ var swap = box[0][1];
+ box[0][1] = box[1][1];
+ box[1][1] = swap;
+ }
- for (var type in resultTypes) {
- var choice = Random.rand();
- if (Random.rand() < 0.25)
- doc[type] = (indResults[type].docIn ? {docIn: "yes"} : {docIn: "no"});
- else if (Random.rand() < 0.5)
- doc[type] = (indResults[type].docIn ? {docIn: ["yes"]} : {docIn: ["no"]});
- else if (Random.rand() < 0.75)
- doc[type] = (indResults[type].docIn ? [{docIn: "yes"}] : [{docIn: "no"}]);
- else
- doc[type] = (indResults[type].docIn ? [{docIn: ["yes"]}] : [{docIn: ["no"]}]);
- }
+ return {
+ center: center,
+ radius: box[1][0] - box[0][0],
+ exact: randPoint(env),
+ sphereCenter: sphereCenter,
+ sphereRadius: sphereRadius,
+ box: box,
+ boxPoly: boxPoly
};
+};
+
+var resultTypes = {
+ "exact": function(loc) {
+ return query.exact[0] == loc[0] && query.exact[1] == loc[1];
+ },
+ "center": function(loc) {
+ return Geo.distance(query.center, loc) <= query.radius;
+ },
+ "box": function(loc) {
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
+ },
+ "sphere": function(loc) {
+ return (query.sphereRadius >= 0
+ ? (Geo.sphereDistance(query.sphereCenter, loc) <= query.sphereRadius)
+ : false);
+ },
+ "poly": function(loc) {
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
+ }
+};
- var randIndexAdditions = function(indexDoc) {
-
+var queryResults = function(locs, query, results) {
+ if (!results["center"]) {
for (var type in resultTypes) {
- if (Random.rand() < 0.5)
- continue;
-
- var choice = Random.rand();
- if (Random.rand() < 0.5)
- indexDoc[type] = 1;
- else
- indexDoc[type + ".docIn"] = 1;
- }
- };
-
- var randYesQuery = function() {
-
- var choice = Math.floor(Random.rand() * 7);
- if (choice == 0)
- return {$ne: "no"};
- else if (choice == 1)
- return "yes";
- else if (choice == 2)
- return /^yes/;
- else if (choice == 3)
- return {$in: ["good", "yes", "ok"]};
- else if (choice == 4)
- return {$exists: true};
- else if (choice == 5)
- return {$nin: ["bad", "no", "not ok"]};
- else if (choice == 6)
- return {$not: /^no/};
- };
-
- var locArray = function(loc) {
- if (loc.x)
- return [loc.x, loc.y];
- if (!loc.length)
- return [loc[0], loc[1]];
- return loc;
- };
-
- var locsArray = function(locs) {
- if (locs.loc) {
- const arr = [];
- for (var i = 0; i < locs.loc.length; i++)
- arr.push(locArray(locs.loc[i]));
- return arr;
- } else {
- const arr = [];
- for (var i = 0; i < locs.length; i++)
- arr.push(locArray(locs[i].loc));
- return arr;
+ results[type] = {docsIn: 0, docsOut: 0, locsIn: 0, locsOut: 0};
}
- };
-
- var minBoxSize = function(env, box) {
- return env.bucketSize * Math.pow(2, minBucketScale(env, box));
- };
-
- var minBucketScale = function(env, box) {
-
- if (box.length && box[0].length)
- box = [box[0][0] - box[1][0], box[0][1] - box[1][1]];
-
- if (box.length)
- box = Math.max(box[0], box[1]);
-
- print(box);
- print(env.bucketSize);
-
- return Math.ceil(Math.log(box / env.bucketSize) / Math.log(2));
+ }
- };
+ var indResults = {};
+ for (var type in resultTypes) {
+ indResults[type] = {docIn: false, locsIn: 0, locsOut: 0};
+ }
- // TODO: Add spherical $uniqueDocs tests
- var numTests = 100;
-
- // Our seed will change every time this is run, but
- // each individual test will be reproducible given
- // that seed and test number
- var seed = new Date().getTime();
- // seed = 175 + 288 + 12
-
- for (var test = 0; test < numTests; test++) {
- Random.srand(seed + test);
- // Random.srand( 42240 )
- // Random.srand( 7344 )
- var t = db.testAllGeo;
- t.drop();
-
- print("Generating test environment #" + test);
- var env = randEnvironment();
- // env.bits = 11
- var query = randQuery(env);
- var data = randDataType();
- // data.numDocs = 5; data.maxLocs = 1;
- var paddingSize = Math.floor(Random.rand() * 10 + 1);
- var results = {};
- var totalPoints = 0;
- print("Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs +
- " locs ");
-
- var bulk = t.initializeUnorderedBulkOp();
- for (var i = 0; i < data.numDocs; i++) {
- var numLocs = Math.floor(Random.rand() * data.maxLocs + 1);
- totalPoints += numLocs;
-
- var multiPoint = [];
- for (var p = 0; p < numLocs; p++) {
- var point = randPoint(env, query);
- multiPoint.push(point);
+ for (var type in resultTypes) {
+ var docIn = false;
+ for (var i = 0; i < locs.length; i++) {
+ if (resultTypes[type](locs[i])) {
+ results[type].locsIn++;
+ indResults[type].locsIn++;
+ indResults[type].docIn = true;
+ } else {
+ results[type].locsOut++;
+ indResults[type].locsOut++;
}
-
- var indResults = queryResults(multiPoint, query, results);
-
- var doc;
- // Nest the keys differently
- if (Random.rand() < 0.5)
- doc = {locs: {loc: randLocTypes(multiPoint)}};
- else
- doc = {locs: randLocTypes(multiPoint, "loc")};
-
- randQueryAdditions(doc, indResults);
-
- doc._id = i;
- bulk.insert(doc);
}
- assert.writeOK(bulk.execute());
-
- var indexDoc = {"locs.loc": "2d"};
- randIndexAdditions(indexDoc);
-
- // "earth" is used to drive test setup and not a valid createIndexes option or required at
- // this point. It must be removed before calling ensureIndexes().
- delete env.earth;
-
- assert.commandWorked(t.ensureIndex(indexDoc, env));
- assert.isnull(db.getLastError());
+ if (indResults[type].docIn)
+ results[type].docsIn++;
+ else
+ results[type].docsOut++;
+ }
- var padding = "x";
- for (var i = 0; i < paddingSize; i++)
- padding = padding + padding;
+ return indResults;
+};
+
+var randQueryAdditions = function(doc, indResults) {
+ for (var type in resultTypes) {
+ var choice = Random.rand();
+ if (Random.rand() < 0.25)
+ doc[type] = (indResults[type].docIn ? {docIn: "yes"} : {docIn: "no"});
+ else if (Random.rand() < 0.5)
+ doc[type] = (indResults[type].docIn ? {docIn: ["yes"]} : {docIn: ["no"]});
+ else if (Random.rand() < 0.75)
+ doc[type] = (indResults[type].docIn ? [{docIn: "yes"}] : [{docIn: "no"}]);
+ else
+ doc[type] = (indResults[type].docIn ? [{docIn: ["yes"]}] : [{docIn: ["no"]}]);
+ }
+};
+
+var randIndexAdditions = function(indexDoc) {
+ for (var type in resultTypes) {
+ if (Random.rand() < 0.5)
+ continue;
+
+ var choice = Random.rand();
+ if (Random.rand() < 0.5)
+ indexDoc[type] = 1;
+ else
+ indexDoc[type + ".docIn"] = 1;
+ }
+};
+
+var randYesQuery = function() {
+ var choice = Math.floor(Random.rand() * 7);
+ if (choice == 0)
+ return {$ne: "no"};
+ else if (choice == 1)
+ return "yes";
+ else if (choice == 2)
+ return /^yes/;
+ else if (choice == 3)
+ return {$in: ["good", "yes", "ok"]};
+ else if (choice == 4)
+ return {$exists: true};
+ else if (choice == 5)
+ return {$nin: ["bad", "no", "not ok"]};
+ else if (choice == 6)
+ return {$not: /^no/};
+};
+
+var locArray = function(loc) {
+ if (loc.x)
+ return [loc.x, loc.y];
+ if (!loc.length)
+ return [loc[0], loc[1]];
+ return loc;
+};
+
+var locsArray = function(locs) {
+ if (locs.loc) {
+ const arr = [];
+ for (var i = 0; i < locs.loc.length; i++)
+ arr.push(locArray(locs.loc[i]));
+ return arr;
+ } else {
+ const arr = [];
+ for (var i = 0; i < locs.length; i++)
+ arr.push(locArray(locs[i].loc));
+ return arr;
+ }
+};
+
+var minBoxSize = function(env, box) {
+ return env.bucketSize * Math.pow(2, minBucketScale(env, box));
+};
+
+var minBucketScale = function(env, box) {
+ if (box.length && box[0].length)
+ box = [box[0][0] - box[1][0], box[0][1] - box[1][1]];
+
+ if (box.length)
+ box = Math.max(box[0], box[1]);
+
+ print(box);
+ print(env.bucketSize);
+
+ return Math.ceil(Math.log(box / env.bucketSize) / Math.log(2));
+};
+
+// TODO: Add spherical $uniqueDocs tests
+var numTests = 100;
+
+// Our seed will change every time this is run, but
+// each individual test will be reproducible given
+// that seed and test number
+var seed = new Date().getTime();
+// seed = 175 + 288 + 12
+
+for (var test = 0; test < numTests; test++) {
+ Random.srand(seed + test);
+ // Random.srand( 42240 )
+ // Random.srand( 7344 )
+ var t = db.testAllGeo;
+ t.drop();
+
+ print("Generating test environment #" + test);
+ var env = randEnvironment();
+ // env.bits = 11
+ var query = randQuery(env);
+ var data = randDataType();
+ // data.numDocs = 5; data.maxLocs = 1;
+ var paddingSize = Math.floor(Random.rand() * 10 + 1);
+ var results = {};
+ var totalPoints = 0;
+ print("Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs +
+ " locs ");
+
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < data.numDocs; i++) {
+ var numLocs = Math.floor(Random.rand() * data.maxLocs + 1);
+ totalPoints += numLocs;
+
+ var multiPoint = [];
+ for (var p = 0; p < numLocs; p++) {
+ var point = randPoint(env, query);
+ multiPoint.push(point);
+ }
- print(padding);
+ var indResults = queryResults(multiPoint, query, results);
- printjson({
- seed: seed,
- test: test,
- env: env,
- query: query,
- data: data,
- results: results,
- paddingSize: paddingSize
- });
+ var doc;
+ // Nest the keys differently
+ if (Random.rand() < 0.5)
+ doc = {locs: {loc: randLocTypes(multiPoint)}};
+ else
+ doc = {locs: randLocTypes(multiPoint, "loc")};
- // exact
- print("Exact query...");
- assert.eq(
- results.exact.docsIn,
- t.find({"locs.loc": randLocType(query.exact), "exact.docIn": randYesQuery()}).count());
+ randQueryAdditions(doc, indResults);
- // $center
- print("Center query...");
- print("Min box : " + minBoxSize(env, query.radius));
+ doc._id = i;
+ bulk.insert(doc);
+ }
+ assert.writeOK(bulk.execute());
+
+ var indexDoc = {"locs.loc": "2d"};
+ randIndexAdditions(indexDoc);
+
+ // "earth" is used to drive test setup and not a valid createIndexes option or required at
+ // this point. It must be removed before calling ensureIndexes().
+ delete env.earth;
+
+ assert.commandWorked(t.ensureIndex(indexDoc, env));
+ assert.isnull(db.getLastError());
+
+ var padding = "x";
+ for (var i = 0; i < paddingSize; i++)
+ padding = padding + padding;
+
+ print(padding);
+
+ printjson({
+ seed: seed,
+ test: test,
+ env: env,
+ query: query,
+ data: data,
+ results: results,
+ paddingSize: paddingSize
+ });
+
+ // exact
+ print("Exact query...");
+ assert.eq(
+ results.exact.docsIn,
+ t.find({"locs.loc": randLocType(query.exact), "exact.docIn": randYesQuery()}).count());
+
+ // $center
+ print("Center query...");
+ print("Min box : " + minBoxSize(env, query.radius));
+ assert.eq(results.center.docsIn,
+ t.find({
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: 1}},
+ "center.docIn": randYesQuery()
+ }).count());
+
+ print("Center query update...");
+ var res = t.update({
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: true}},
+ "center.docIn": randYesQuery()
+ },
+ {$set: {centerPaddingA: padding}},
+ false,
+ true);
+ assert.eq(results.center.docsIn, res.nModified);
+
+ if (query.sphereRadius >= 0) {
+ print("Center sphere query...");
+ // $centerSphere
assert.eq(
- results.center.docsIn,
+ results.sphere.docsIn,
t.find({
- "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: 1}},
- "center.docIn": randYesQuery()
+ "locs.loc": {$within: {$centerSphere: [query.sphereCenter, query.sphereRadius]}},
+ "sphere.docIn": randYesQuery()
}).count());
- print("Center query update...");
- var res = t.update({
- "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: true}},
- "center.docIn": randYesQuery()
- },
- {$set: {centerPaddingA: padding}},
- false,
- true);
- assert.eq(results.center.docsIn, res.nModified);
-
- if (query.sphereRadius >= 0) {
- print("Center sphere query...");
- // $centerSphere
- assert.eq(results.sphere.docsIn,
- t.find({
- "locs.loc":
- {$within: {$centerSphere: [query.sphereCenter, query.sphereRadius]}},
- "sphere.docIn": randYesQuery()
- }).count());
-
- print("Center sphere query update...");
- res = t.update({
- "locs.loc": {
- $within: {
- $centerSphere: [query.sphereCenter, query.sphereRadius],
- $uniqueDocs: true
- }
- },
- "sphere.docIn": randYesQuery()
+ print("Center sphere query update...");
+ res = t.update({
+ "locs.loc": {
+ $within:
+ {$centerSphere: [query.sphereCenter, query.sphereRadius], $uniqueDocs: true}
},
- {$set: {spherePaddingA: padding}},
- false,
- true);
- assert.eq(results.sphere.docsIn, res.nModified);
- }
+ "sphere.docIn": randYesQuery()
+ },
+ {$set: {spherePaddingA: padding}},
+ false,
+ true);
+ assert.eq(results.sphere.docsIn, res.nModified);
+ }
- // $box
- print("Box query...");
- assert.eq(results.box.docsIn,
- t.find({
- "locs.loc": {$within: {$box: query.box, $uniqueDocs: true}},
- "box.docIn": randYesQuery()
- }).count());
-
- // $polygon
- print("Polygon query...");
- assert.eq(results.poly.docsIn, t.find({
- "locs.loc": {$within: {$polygon: query.boxPoly}},
- "poly.docIn": randYesQuery()
- }).count());
-
- // $near
- print("Near query...");
+ // $box
+ print("Box query...");
+ assert.eq(results.box.docsIn, t.find({
+ "locs.loc": {$within: {$box: query.box, $uniqueDocs: true}},
+ "box.docIn": randYesQuery()
+ }).count());
+
+ // $polygon
+ print("Polygon query...");
+ assert.eq(results.poly.docsIn, t.find({
+ "locs.loc": {$within: {$polygon: query.boxPoly}},
+ "poly.docIn": randYesQuery()
+ }).count());
+
+ // $near
+ print("Near query...");
+ assert.eq(results.center.docsIn,
+ t.find({"locs.loc": {$near: query.center, $maxDistance: query.radius}}).count(true),
+ "Near query: center: " + query.center + "; radius: " + query.radius +
+ "; docs: " + results.center.docsIn + "; locs: " + results.center.locsIn);
+
+ if (query.sphereRadius >= 0) {
+ print("Near sphere query...");
+ // $centerSphere
assert.eq(
- results.center.docsIn,
- t.find({"locs.loc": {$near: query.center, $maxDistance: query.radius}}).count(true),
- "Near query: center: " + query.center + "; radius: " + query.radius + "; docs: " +
- results.center.docsIn + "; locs: " + results.center.locsIn);
-
- if (query.sphereRadius >= 0) {
- print("Near sphere query...");
- // $centerSphere
- assert.eq(results.sphere.docsIn,
- t.find({
- "locs.loc":
- {$nearSphere: query.sphereCenter, $maxDistance: query.sphereRadius}
- }).count(true),
- "Near sphere query: sphere center: " + query.sphereCenter + "; radius: " +
- query.sphereRadius + "; docs: " + results.sphere.docsIn + "; locs: " +
- results.sphere.locsIn);
- }
+ results.sphere.docsIn,
+ t.find({
+ "locs.loc": {$nearSphere: query.sphereCenter, $maxDistance: query.sphereRadius}
+ }).count(true),
+ "Near sphere query: sphere center: " + query.sphereCenter +
+ "; radius: " + query.sphereRadius + "; docs: " + results.sphere.docsIn +
+ "; locs: " + results.sphere.locsIn);
+ }
- // $geoNear aggregation stage.
- const aggregationLimit = 2 * results.center.docsIn;
- if (aggregationLimit > 0) {
- var output = t.aggregate([
- {
- $geoNear: {
- near: query.center,
- maxDistance: query.radius,
- includeLocs: "pt",
- distanceField: "dis",
- }
- },
- {$limit: aggregationLimit}
- ]).toArray();
-
- const errmsg = {
- limit: aggregationLimit,
- center: query.center,
- radius: query.radius,
- docs: results.center.docsIn,
- locs: results.center.locsIn,
- actualResult: output
- };
- assert.eq(results.center.docsIn, output.length, tojson(errmsg));
-
- let lastDistance = 0;
- for (var i = 0; i < output.length; i++) {
- var retDistance = output[i].dis;
- assert.close(retDistance, Geo.distance(locArray(query.center), output[i].pt));
- assert.lte(retDistance, query.radius);
- assert.gte(retDistance, lastDistance);
- lastDistance = retDistance;
- }
+ // $geoNear aggregation stage.
+ const aggregationLimit = 2 * results.center.docsIn;
+ if (aggregationLimit > 0) {
+ var output = t.aggregate([
+ {
+ $geoNear: {
+ near: query.center,
+ maxDistance: query.radius,
+ includeLocs: "pt",
+ distanceField: "dis",
+ }
+ },
+ {$limit: aggregationLimit}
+ ]).toArray();
+
+ const errmsg = {
+ limit: aggregationLimit,
+ center: query.center,
+ radius: query.radius,
+ docs: results.center.docsIn,
+ locs: results.center.locsIn,
+ actualResult: output
+ };
+ assert.eq(results.center.docsIn, output.length, tojson(errmsg));
+
+ let lastDistance = 0;
+ for (var i = 0; i < output.length; i++) {
+ var retDistance = output[i].dis;
+ assert.close(retDistance, Geo.distance(locArray(query.center), output[i].pt));
+ assert.lte(retDistance, query.radius);
+ assert.gte(retDistance, lastDistance);
+ lastDistance = retDistance;
}
-
- // $polygon
- print("Polygon remove...");
- res = t.remove(
- {"locs.loc": {$within: {$polygon: query.boxPoly}}, "poly.docIn": randYesQuery()});
- assert.eq(results.poly.docsIn, res.nRemoved);
}
- MongoRunner.stopMongod(conn);
+ // $polygon
+ print("Polygon remove...");
+ res =
+ t.remove({"locs.loc": {$within: {$polygon: query.boxPoly}}, "poly.docIn": randYesQuery()});
+ assert.eq(results.poly.docsIn, res.nRemoved);
+}
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js
index 6eb52933161..9f402db0d16 100644
--- a/jstests/noPassthrough/geo_mnypts_plus_fields.js
+++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js
@@ -1,107 +1,103 @@
// Test sanity of geo queries with a lot of points
(function() {
- "use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- const db = conn.getDB("test");
+"use strict";
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+const db = conn.getDB("test");
- var maxFields = 3;
+var maxFields = 3;
- for (var fields = 1; fields < maxFields; fields++) {
- var coll = db.testMnyPts;
- coll.drop();
+for (var fields = 1; fields < maxFields; fields++) {
+ var coll = db.testMnyPts;
+ coll.drop();
- var totalPts = 500 * 1000;
+ var totalPts = 500 * 1000;
- var bulk = coll.initializeUnorderedBulkOp();
- // Add points in a 100x100 grid
- for (var i = 0; i < totalPts; i++) {
- var ii = i % 10000;
+ var bulk = coll.initializeUnorderedBulkOp();
+ // Add points in a 100x100 grid
+ for (var i = 0; i < totalPts; i++) {
+ var ii = i % 10000;
- var doc = {loc: [ii % 100, Math.floor(ii / 100)]};
+ var doc = {loc: [ii % 100, Math.floor(ii / 100)]};
- // Add fields with different kinds of data
- for (var j = 0; j < fields; j++) {
- var field = null;
-
- if (j % 3 == 0) {
- // Make half the points not searchable
- field = "abcdefg" + (i % 2 == 0 ? "h" : "");
- } else if (j % 3 == 1) {
- field = new Date();
- } else {
- field = true;
- }
-
- doc["field" + j] = field;
- }
-
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
-
- // Create the query for the additional fields
- const queryFields = {};
+ // Add fields with different kinds of data
for (var j = 0; j < fields; j++) {
var field = null;
if (j % 3 == 0) {
- field = "abcdefg";
+ // Make half the points not searchable
+ field = "abcdefg" + (i % 2 == 0 ? "h" : "");
} else if (j % 3 == 1) {
- field = {$lte: new Date()};
+ field = new Date();
} else {
field = true;
}
- queryFields["field" + j] = field;
+ doc["field" + j] = field;
}
- coll.ensureIndex({loc: "2d"});
-
- // Check that quarter of points in each quadrant
- for (var i = 0; i < 4; i++) {
- var x = i % 2;
- var y = Math.floor(i / 2);
-
- var box = [[0, 0], [49, 49]];
- box[0][0] += (x == 1 ? 50 : 0);
- box[1][0] += (x == 1 ? 50 : 0);
- box[0][1] += (y == 1 ? 50 : 0);
- box[1][1] += (y == 1 ? 50 : 0);
-
- // Now only half of each result comes back
- assert.eq(totalPts / (4 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
- assert.eq(
- totalPts / (4 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
+ bulk.insert(doc);
+ }
+ assert.writeOK(bulk.execute());
+
+ // Create the query for the additional fields
+ const queryFields = {};
+ for (var j = 0; j < fields; j++) {
+ var field = null;
+
+ if (j % 3 == 0) {
+ field = "abcdefg";
+ } else if (j % 3 == 1) {
+ field = {$lte: new Date()};
+ } else {
+ field = true;
}
- // Check that half of points in each half
- for (var i = 0; i < 2; i++) {
- var box = [[0, 0], [49, 99]];
- box[0][0] += (i == 1 ? 50 : 0);
- box[1][0] += (i == 1 ? 50 : 0);
-
- assert.eq(totalPts / (2 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
- assert.eq(
- totalPts / (2 * 2),
- coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
- }
+ queryFields["field" + j] = field;
+ }
+
+ coll.ensureIndex({loc: "2d"});
+
+ // Check that quarter of points in each quadrant
+ for (var i = 0; i < 4; i++) {
+ var x = i % 2;
+ var y = Math.floor(i / 2);
+
+ var box = [[0, 0], [49, 49]];
+ box[0][0] += (x == 1 ? 50 : 0);
+ box[1][0] += (x == 1 ? 50 : 0);
+ box[0][1] += (y == 1 ? 50 : 0);
+ box[1][1] += (y == 1 ? 50 : 0);
- // Check that all but corner set of points in radius
- var circle = [[0, 0], (100 - 1) * Math.sqrt(2) - 0.25];
+ // Now only half of each result comes back
+ assert.eq(totalPts / (4 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
+ assert.eq(totalPts / (4 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
+ }
+
+ // Check that half of points in each half
+ for (var i = 0; i < 2; i++) {
+ var box = [[0, 0], [49, 99]];
+ box[0][0] += (i == 1 ? 50 : 0);
+ box[1][0] += (i == 1 ? 50 : 0);
- // All [99,x] pts are field0 : "abcdefg"
- assert.eq(
- totalPts / 2 - totalPts / (100 * 100),
- coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).count());
- assert.eq(
- totalPts / 2 - totalPts / (100 * 100),
- coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).itcount());
+ assert.eq(totalPts / (2 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
+ assert.eq(totalPts / (2 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
}
- MongoRunner.stopMongod(conn);
+ // Check that all but corner set of points in radius
+ var circle = [[0, 0], (100 - 1) * Math.sqrt(2) - 0.25];
+
+ // All [99,x] pts are field0 : "abcdefg"
+ assert.eq(totalPts / 2 - totalPts / (100 * 100),
+ coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).count());
+ assert.eq(totalPts / 2 - totalPts / (100 * 100),
+ coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).itcount());
+}
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/geo_near_random1.js b/jstests/noPassthrough/geo_near_random1.js
index 9c59e21c9a0..06dcf86c819 100644
--- a/jstests/noPassthrough/geo_near_random1.js
+++ b/jstests/noPassthrough/geo_near_random1.js
@@ -1,22 +1,22 @@
// this tests all points using $near
var db;
(function() {
- "use strict";
- load("jstests/libs/geo_near_random.js");
+"use strict";
+load("jstests/libs/geo_near_random.js");
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+db = conn.getDB("test");
- var test = new GeoNearRandomTest("weekly.geo_near_random1");
+var test = new GeoNearRandomTest("weekly.geo_near_random1");
- test.insertPts(1000);
+test.insertPts(1000);
- test.testPt([0, 0]);
- test.testPt(test.mkPt());
- test.testPt(test.mkPt());
- test.testPt(test.mkPt());
- test.testPt(test.mkPt());
+test.testPt([0, 0]);
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
+test.testPt(test.mkPt());
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/geo_near_random2.js b/jstests/noPassthrough/geo_near_random2.js
index aa09ebac6ff..b5ec59af112 100644
--- a/jstests/noPassthrough/geo_near_random2.js
+++ b/jstests/noPassthrough/geo_near_random2.js
@@ -1,30 +1,33 @@
// this tests 1% of all points using $near and $nearSphere
var db;
(function() {
- "use strict";
- load("jstests/libs/geo_near_random.js");
+"use strict";
+load("jstests/libs/geo_near_random.js");
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+db = conn.getDB("test");
- var test = new GeoNearRandomTest("weekly.geo_near_random2");
+var test = new GeoNearRandomTest("weekly.geo_near_random2");
- test.insertPts(50000);
+test.insertPts(50000);
- const opts = {sphere: 0, nToTest: test.nPts * 0.01};
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
- test.testPt(test.mkPt(), opts);
+const opts = {
+ sphere: 0,
+ nToTest: test.nPts * 0.01
+};
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
+test.testPt(test.mkPt(), opts);
- opts.sphere = 1;
- test.testPt([0, 0], opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
- test.testPt(test.mkPt(0.8), opts);
+opts.sphere = 1;
+test.testPt([0, 0], opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
+test.testPt(test.mkPt(0.8), opts);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/global_operation_latency_histogram.js b/jstests/noPassthrough/global_operation_latency_histogram.js
index 90d24903ef6..2f103e70a96 100644
--- a/jstests/noPassthrough/global_operation_latency_histogram.js
+++ b/jstests/noPassthrough/global_operation_latency_histogram.js
@@ -2,167 +2,166 @@
// @tags: [requires_replication]
(function() {
- "use strict";
- var name = "operationalLatencyHistogramTest";
-
- var mongo = MongoRunner.runMongod();
- var testDB = mongo.getDB("test");
- var testColl = testDB[name + "coll"];
-
- testColl.drop();
-
- function getHistogramStats() {
- return testDB.serverStatus({opLatencies: {histograms: 1}}).opLatencies;
- }
-
- var lastHistogram = getHistogramStats();
-
- // Checks that the difference in the histogram is what we expect, and also
- // accounts for the serverStatus command itself.
- function checkHistogramDiff(reads, writes, commands) {
- var thisHistogram = getHistogramStats();
- assert.eq(thisHistogram.reads.ops - lastHistogram.reads.ops, reads);
- assert.eq(thisHistogram.writes.ops - lastHistogram.writes.ops, writes);
- // Running the server status itself will increment command stats by one.
- assert.eq(thisHistogram.commands.ops - lastHistogram.commands.ops, commands + 1);
- return thisHistogram;
- }
-
- // Insert
- var numRecords = 100;
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.insert({_id: i}));
- }
- lastHistogram = checkHistogramDiff(0, numRecords, 0);
-
- // Update
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}));
- }
- lastHistogram = checkHistogramDiff(0, numRecords, 0);
-
- // Find
- var cursors = [];
- for (var i = 0; i < numRecords; i++) {
- cursors[i] = testColl.find({x: {$gte: i}}).batchSize(2);
- assert.eq(cursors[i].next()._id, i);
- }
- lastHistogram = checkHistogramDiff(numRecords, 0, 0);
-
- // GetMore
- for (var i = 0; i < numRecords / 2; i++) {
- // Trigger two getmore commands.
- assert.eq(cursors[i].next()._id, i + 1);
- assert.eq(cursors[i].next()._id, i + 2);
- assert.eq(cursors[i].next()._id, i + 3);
- assert.eq(cursors[i].next()._id, i + 4);
- }
- lastHistogram = checkHistogramDiff(numRecords, 0, 0);
-
- // KillCursors
- // The last cursor has no additional results, hence does not need to be closed.
- for (var i = 0; i < numRecords - 1; i++) {
- cursors[i].close();
- }
- lastHistogram = checkHistogramDiff(0, 0, numRecords - 1);
-
- // Remove
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.remove({_id: i}));
- }
- lastHistogram = checkHistogramDiff(0, numRecords, 0);
-
- // Upsert
- for (var i = 0; i < numRecords; i++) {
- assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
- }
- lastHistogram = checkHistogramDiff(0, numRecords, 0);
-
- // Aggregate
- for (var i = 0; i < numRecords; i++) {
- testColl.aggregate([{$match: {x: i}}, {$group: {_id: "$x"}}]);
- }
- lastHistogram = checkHistogramDiff(numRecords, 0, 0);
-
- // Count
- for (var i = 0; i < numRecords; i++) {
- testColl.count({x: i});
- }
- lastHistogram = checkHistogramDiff(numRecords, 0, 0);
-
- // FindAndModify
- testColl.findAndModify({query: {}, update: {pt: {type: "Point", coordinates: [0, 0]}}});
- lastHistogram = checkHistogramDiff(0, 1, 0);
-
- // CreateIndex
- assert.commandWorked(testColl.createIndex({pt: "2dsphere"}));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // $geoNear aggregation stage
- assert.commandWorked(testDB.runCommand({
- aggregate: testColl.getName(),
- pipeline: [{
- $geoNear: {
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- distanceField: "dist",
- }
- }],
- cursor: {},
- }));
- lastHistogram = checkHistogramDiff(1, 0, 0);
-
- // GetIndexes
- testColl.getIndexes();
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // Reindex
- assert.commandWorked(testColl.reIndex());
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // DropIndex
- assert.commandWorked(testColl.dropIndex({pt: "2dsphere"}));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // Explain
- testColl.explain().find().next();
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // CollStats
- assert.commandWorked(testDB.runCommand({collStats: testColl.getName()}));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // CollMod
- assert.commandWorked(
- testDB.runCommand({collStats: testColl.getName(), validationLevel: "off"}));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // Compact
- var commandResult = testDB.runCommand({compact: testColl.getName()});
- // If storage engine supports compact, it should count as a command.
- if (!commandResult.ok) {
- assert.commandFailedWithCode(commandResult, ErrorCodes.CommandNotSupported);
- }
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // DataSize
- testColl.dataSize();
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // PlanCache
- testColl.getPlanCache().listQueryShapes();
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // ServerStatus
- assert.commandWorked(testDB.serverStatus());
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // WhatsMyURI
- assert.commandWorked(testColl.runCommand("whatsmyuri"));
- lastHistogram = checkHistogramDiff(0, 0, 1);
-
- // Test non-command.
- assert.commandFailed(testColl.runCommand("IHopeNobodyEverMakesThisACommand"));
- lastHistogram = checkHistogramDiff(0, 0, 1);
- MongoRunner.stopMongod(mongo);
+"use strict";
+var name = "operationalLatencyHistogramTest";
+
+var mongo = MongoRunner.runMongod();
+var testDB = mongo.getDB("test");
+var testColl = testDB[name + "coll"];
+
+testColl.drop();
+
+function getHistogramStats() {
+ return testDB.serverStatus({opLatencies: {histograms: 1}}).opLatencies;
+}
+
+var lastHistogram = getHistogramStats();
+
+// Checks that the difference in the histogram is what we expect, and also
+// accounts for the serverStatus command itself.
+function checkHistogramDiff(reads, writes, commands) {
+ var thisHistogram = getHistogramStats();
+ assert.eq(thisHistogram.reads.ops - lastHistogram.reads.ops, reads);
+ assert.eq(thisHistogram.writes.ops - lastHistogram.writes.ops, writes);
+ // Running the server status itself will increment command stats by one.
+ assert.eq(thisHistogram.commands.ops - lastHistogram.commands.ops, commands + 1);
+ return thisHistogram;
+}
+
+// Insert
+var numRecords = 100;
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.insert({_id: i}));
+}
+lastHistogram = checkHistogramDiff(0, numRecords, 0);
+
+// Update
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.update({_id: i}, {x: i}));
+}
+lastHistogram = checkHistogramDiff(0, numRecords, 0);
+
+// Find
+var cursors = [];
+for (var i = 0; i < numRecords; i++) {
+ cursors[i] = testColl.find({x: {$gte: i}}).batchSize(2);
+ assert.eq(cursors[i].next()._id, i);
+}
+lastHistogram = checkHistogramDiff(numRecords, 0, 0);
+
+// GetMore
+for (var i = 0; i < numRecords / 2; i++) {
+ // Trigger two getmore commands.
+ assert.eq(cursors[i].next()._id, i + 1);
+ assert.eq(cursors[i].next()._id, i + 2);
+ assert.eq(cursors[i].next()._id, i + 3);
+ assert.eq(cursors[i].next()._id, i + 4);
+}
+lastHistogram = checkHistogramDiff(numRecords, 0, 0);
+
+// KillCursors
+// The last cursor has no additional results, hence does not need to be closed.
+for (var i = 0; i < numRecords - 1; i++) {
+ cursors[i].close();
+}
+lastHistogram = checkHistogramDiff(0, 0, numRecords - 1);
+
+// Remove
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.remove({_id: i}));
+}
+lastHistogram = checkHistogramDiff(0, numRecords, 0);
+
+// Upsert
+for (var i = 0; i < numRecords; i++) {
+ assert.writeOK(testColl.update({_id: i}, {x: i}, {upsert: 1}));
+}
+lastHistogram = checkHistogramDiff(0, numRecords, 0);
+
+// Aggregate
+for (var i = 0; i < numRecords; i++) {
+ testColl.aggregate([{$match: {x: i}}, {$group: {_id: "$x"}}]);
+}
+lastHistogram = checkHistogramDiff(numRecords, 0, 0);
+
+// Count
+for (var i = 0; i < numRecords; i++) {
+ testColl.count({x: i});
+}
+lastHistogram = checkHistogramDiff(numRecords, 0, 0);
+
+// FindAndModify
+testColl.findAndModify({query: {}, update: {pt: {type: "Point", coordinates: [0, 0]}}});
+lastHistogram = checkHistogramDiff(0, 1, 0);
+
+// CreateIndex
+assert.commandWorked(testColl.createIndex({pt: "2dsphere"}));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// $geoNear aggregation stage
+assert.commandWorked(testDB.runCommand({
+ aggregate: testColl.getName(),
+ pipeline: [{
+ $geoNear: {
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ distanceField: "dist",
+ }
+ }],
+ cursor: {},
+}));
+lastHistogram = checkHistogramDiff(1, 0, 0);
+
+// GetIndexes
+testColl.getIndexes();
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// Reindex
+assert.commandWorked(testColl.reIndex());
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// DropIndex
+assert.commandWorked(testColl.dropIndex({pt: "2dsphere"}));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// Explain
+testColl.explain().find().next();
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// CollStats
+assert.commandWorked(testDB.runCommand({collStats: testColl.getName()}));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// CollMod
+assert.commandWorked(testDB.runCommand({collStats: testColl.getName(), validationLevel: "off"}));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// Compact
+var commandResult = testDB.runCommand({compact: testColl.getName()});
+// If storage engine supports compact, it should count as a command.
+if (!commandResult.ok) {
+ assert.commandFailedWithCode(commandResult, ErrorCodes.CommandNotSupported);
+}
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// DataSize
+testColl.dataSize();
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// PlanCache
+testColl.getPlanCache().listQueryShapes();
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// ServerStatus
+assert.commandWorked(testDB.serverStatus());
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// WhatsMyURI
+assert.commandWorked(testColl.runCommand("whatsmyuri"));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+
+// Test non-command.
+assert.commandFailed(testColl.runCommand("IHopeNobodyEverMakesThisACommand"));
+lastHistogram = checkHistogramDiff(0, 0, 1);
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/global_transaction_latency_histogram.js b/jstests/noPassthrough/global_transaction_latency_histogram.js
index 16bba6fb313..56e8a2ca4c9 100644
--- a/jstests/noPassthrough/global_transaction_latency_histogram.js
+++ b/jstests/noPassthrough/global_transaction_latency_histogram.js
@@ -1,120 +1,121 @@
// Checks that the global histogram counter for transactions are updated as we expect.
// @tags: [requires_replication, uses_transactions]
(function() {
- "use strict";
-
- // Set up the replica set.
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
-
- // Set up the test database.
- const dbName = "test";
- const collName = "global_transaction_latency_histogram";
-
- const testDB = primary.getDB(dbName);
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Start the session.
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- function getHistogramStats() {
- return testDB.serverStatus({opLatencies: {histograms: 1}}).opLatencies;
- }
-
- // Checks that the actual value is within a minimum on the bound of the expected value. All
- // arguments must be in the same units.
- function assertLowerBound(expected, actual, bound) {
- assert.gte(actual, expected - bound);
- }
-
- // This function checks the diff between the last histogram and the current histogram, not the
- // absolute values.
- function checkHistogramDiff(lastHistogram, thisHistogram, fields) {
- for (let key in fields) {
- if (fields.hasOwnProperty(key)) {
- assert.eq(thisHistogram[key].ops - lastHistogram[key].ops, fields[key]);
- }
+"use strict";
+
+// Set up the replica set.
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
+
+// Set up the test database.
+const dbName = "test";
+const collName = "global_transaction_latency_histogram";
+
+const testDB = primary.getDB(dbName);
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Start the session.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+function getHistogramStats() {
+ return testDB.serverStatus({opLatencies: {histograms: 1}}).opLatencies;
+}
+
+// Checks that the actual value is within a minimum on the bound of the expected value. All
+// arguments must be in the same units.
+function assertLowerBound(expected, actual, bound) {
+ assert.gte(actual, expected - bound);
+}
+
+// This function checks the diff between the last histogram and the current histogram, not the
+// absolute values.
+function checkHistogramDiff(lastHistogram, thisHistogram, fields) {
+ for (let key in fields) {
+ if (fields.hasOwnProperty(key)) {
+ assert.eq(thisHistogram[key].ops - lastHistogram[key].ops, fields[key]);
}
- return thisHistogram;
}
-
- // This function checks the diff between the last histogram's accumulated transactions latency
- // and this histogram's accumulated transactions latency is within a reasonable bound of what
- // we expect.
- function checkHistogramLatencyDiff(lastHistogram, thisHistogram, sleepTime) {
- let latencyDiff = thisHistogram.transactions.latency - lastHistogram.transactions.latency;
- // Check the bound in microseconds, which is the unit the latency is in. We do not check
- // upper bound because of unknown extra server latency.
- assertLowerBound(sleepTime * 1000, latencyDiff, 50000);
- return thisHistogram;
- }
-
- let lastHistogram = getHistogramStats();
-
- // Verify the base stats are correct.
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 0, "commands": 1, "transactions": 0});
-
- // Test histogram increments on a successful transaction. "commitTransaction" and "serverStatus"
- // commands are counted towards the "commands" counter.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
- assert.commandWorked(session.commitTransaction_forTesting());
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 1, "commands": 2, "transactions": 1});
-
- // Test histogram increments on aborted transaction due to error (duplicate insert).
+ return thisHistogram;
+}
+
+// This function checks the diff between the last histogram's accumulated transactions latency
+// and this histogram's accumulated transactions latency is within a reasonable bound of what
+// we expect.
+function checkHistogramLatencyDiff(lastHistogram, thisHistogram, sleepTime) {
+ let latencyDiff = thisHistogram.transactions.latency - lastHistogram.transactions.latency;
+ // Check the bound in microseconds, which is the unit the latency is in. We do not check
+ // upper bound because of unknown extra server latency.
+ assertLowerBound(sleepTime * 1000, latencyDiff, 50000);
+ return thisHistogram;
+}
+
+let lastHistogram = getHistogramStats();
+
+// Verify the base stats are correct.
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 0, "commands": 1, "transactions": 0});
+
+// Test histogram increments on a successful transaction. "commitTransaction" and "serverStatus"
+// commands are counted towards the "commands" counter.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
+assert.commandWorked(session.commitTransaction_forTesting());
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 1, "commands": 2, "transactions": 1});
+
+// Test histogram increments on aborted transaction due to error (duplicate insert).
+session.startTransaction();
+assert.commandFailedWithCode(sessionColl.insert({_id: "insert-1"}), ErrorCodes.DuplicateKey);
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 1, "commands": 1, "transactions": 1});
+
+// Ensure that the transaction was aborted on failure.
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 0, "commands": 2, "transactions": 0});
+
+// Test histogram increments on an aborted transaction. "abortTransaction" command is counted
+// towards the "commands" counter.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
+assert.commandWorked(session.abortTransaction_forTesting());
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 0, "writes": 1, "commands": 2, "transactions": 1});
+
+// Test histogram increments on a multi-statement committed transaction.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
+assert.commandWorked(sessionColl.insert({_id: "insert-4"}));
+assert.eq(sessionColl.find({_id: "insert-1"}).itcount(), 1);
+assert.commandWorked(session.commitTransaction_forTesting());
+lastHistogram = checkHistogramDiff(lastHistogram,
+ getHistogramStats(),
+ {"reads": 1, "writes": 2, "commands": 2, "transactions": 1});
+
+// Test that the cumulative transaction latency counter is updated appropriately after a
+// sequence of back-to-back 200 ms transactions.
+const sleepTime = 200;
+for (let i = 0; i < 3; i++) {
session.startTransaction();
- assert.commandFailedWithCode(sessionColl.insert({_id: "insert-1"}), ErrorCodes.DuplicateKey);
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 1, "commands": 1, "transactions": 1});
-
- // Ensure that the transaction was aborted on failure.
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 0, "commands": 2, "transactions": 0});
-
- // Test histogram increments on an aborted transaction. "abortTransaction" command is counted
- // towards the "commands" counter.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
- assert.commandWorked(session.abortTransaction_forTesting());
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 0, "writes": 1, "commands": 2, "transactions": 1});
-
- // Test histogram increments on a multi-statement committed transaction.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
- assert.commandWorked(sessionColl.insert({_id: "insert-4"}));
assert.eq(sessionColl.find({_id: "insert-1"}).itcount(), 1);
+ sleep(sleepTime);
assert.commandWorked(session.commitTransaction_forTesting());
- lastHistogram = checkHistogramDiff(lastHistogram,
- getHistogramStats(),
- {"reads": 1, "writes": 2, "commands": 2, "transactions": 1});
-
- // Test that the cumulative transaction latency counter is updated appropriately after a
- // sequence of back-to-back 200 ms transactions.
- const sleepTime = 200;
- for (let i = 0; i < 3; i++) {
- session.startTransaction();
- assert.eq(sessionColl.find({_id: "insert-1"}).itcount(), 1);
- sleep(sleepTime);
- assert.commandWorked(session.commitTransaction_forTesting());
- lastHistogram = checkHistogramLatencyDiff(lastHistogram, getHistogramStats(), sleepTime);
- }
+ lastHistogram = checkHistogramLatencyDiff(lastHistogram, getHistogramStats(), sleepTime);
+}
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/hostname_bind_ips.js b/jstests/noPassthrough/hostname_bind_ips.js
index d7d87e99ec5..1de16bde73e 100644
--- a/jstests/noPassthrough/hostname_bind_ips.js
+++ b/jstests/noPassthrough/hostname_bind_ips.js
@@ -2,20 +2,20 @@
// binding to localhost and enabling IPv6.
(function() {
- 'use strict';
+'use strict';
- const proc = MongoRunner.runMongod({bind_ip: "localhost", "ipv6": ""});
- assert.neq(proc, null);
+const proc = MongoRunner.runMongod({bind_ip: "localhost", "ipv6": ""});
+assert.neq(proc, null);
- assert.soon(function() {
- try {
- const uri = 'mongodb://127.0.0.1:' + proc.port + '/test';
- const conn = new Mongo(uri);
- assert.commandWorked(conn.adminCommand({ping: 1}));
- return true;
- } catch (e) {
- return false;
- }
- }, "Cannot connect to 127.0.0.1 when bound to localhost", 30 * 1000);
- MongoRunner.stopMongod(proc);
+assert.soon(function() {
+ try {
+ const uri = 'mongodb://127.0.0.1:' + proc.port + '/test';
+ const conn = new Mongo(uri);
+ assert.commandWorked(conn.adminCommand({ping: 1}));
+ return true;
+ } catch (e) {
+ return false;
+ }
+}, "Cannot connect to 127.0.0.1 when bound to localhost", 30 * 1000);
+MongoRunner.stopMongod(proc);
})();
diff --git a/jstests/noPassthrough/http_client_keep_alive.js b/jstests/noPassthrough/http_client_keep_alive.js
index a8d802d929e..689231dbe03 100644
--- a/jstests/noPassthrough/http_client_keep_alive.js
+++ b/jstests/noPassthrough/http_client_keep_alive.js
@@ -2,61 +2,59 @@
// @tags: [requires_http_client]
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/configExpand/lib.js');
-
- function runTest(mongod, web) {
- assert(mongod);
- const admin = mongod.getDB('admin');
-
- // Only bother with this test when using curl >= 7.57.0.
- const http_status = admin.adminCommand({serverStatus: 1, http_client: 1});
- const http_client = assert.commandWorked(http_status).http_client;
- if (http_client.type !== 'curl') {
- print("*** Skipping test, not using curl");
- return;
- }
-
- printjson(http_client);
- if (http_client.running.version_num < 0x73900) {
- // 39 hex == 57 dec, so 0x73900 == 7.57.0
- print(
- "*** Skipping test, curl < 7.57.0 does not support connection pooling via share interface");
- return;
- }
-
- // Issue a series of requests to the mock server.
- for (let i = 0; i < 10; ++i) {
- const cmd =
- admin.runCommand({httpClientRequest: 1, uri: web.getStringReflectionURL(i)});
- const reflect = assert.commandWorked(cmd).body;
- assert.eq(reflect, i, "Mock server reflected something unexpected.");
- }
-
- // Check connect count.
- const countCmd =
- admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connect_count'});
- const count = assert.commandWorked(countCmd).body;
- assert.eq(count, 1, "Connections were not kept alive.");
-
- // Force the open connection to close.
- const closeCmd =
- admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connection_close'});
- const close = assert.commandWorked(closeCmd).body;
- assert.eq(close, 'closed');
-
- // Check count with new connection.
- const connectsCmd =
- admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connect_count'});
- const connects = assert.commandWorked(connectsCmd).body;
- assert.eq(connects, 2, "Connection count incorrect.");
+'use strict';
+
+load('jstests/noPassthrough/libs/configExpand/lib.js');
+
+function runTest(mongod, web) {
+ assert(mongod);
+ const admin = mongod.getDB('admin');
+
+ // Only bother with this test when using curl >= 7.57.0.
+ const http_status = admin.adminCommand({serverStatus: 1, http_client: 1});
+ const http_client = assert.commandWorked(http_status).http_client;
+ if (http_client.type !== 'curl') {
+ print("*** Skipping test, not using curl");
+ return;
+ }
+
+ printjson(http_client);
+ if (http_client.running.version_num < 0x73900) {
+ // 39 hex == 57 dec, so 0x73900 == 7.57.0
+ print(
+ "*** Skipping test, curl < 7.57.0 does not support connection pooling via share interface");
+ return;
+ }
+
+ // Issue a series of requests to the mock server.
+ for (let i = 0; i < 10; ++i) {
+ const cmd = admin.runCommand({httpClientRequest: 1, uri: web.getStringReflectionURL(i)});
+ const reflect = assert.commandWorked(cmd).body;
+ assert.eq(reflect, i, "Mock server reflected something unexpected.");
}
- const web = new ConfigExpandRestServer();
- web.start();
- const mongod = MongoRunner.runMongod({setParameter: 'enableTestCommands=1'});
- runTest(mongod, web);
- MongoRunner.stopMongod(mongod);
- web.stop();
+ // Check connect count.
+ const countCmd = admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connect_count'});
+ const count = assert.commandWorked(countCmd).body;
+ assert.eq(count, 1, "Connections were not kept alive.");
+
+ // Force the open connection to close.
+ const closeCmd =
+ admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connection_close'});
+ const close = assert.commandWorked(closeCmd).body;
+ assert.eq(close, 'closed');
+
+ // Check count with new connection.
+ const connectsCmd =
+ admin.runCommand({httpClientRequest: 1, uri: web.getURL() + '/connect_count'});
+ const connects = assert.commandWorked(connectsCmd).body;
+ assert.eq(connects, 2, "Connection count incorrect.");
+}
+
+const web = new ConfigExpandRestServer();
+web.start();
+const mongod = MongoRunner.runMongod({setParameter: 'enableTestCommands=1'});
+runTest(mongod, web);
+MongoRunner.stopMongod(mongod);
+web.stop();
})();
diff --git a/jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js b/jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js
index a99533a8bbf..728f5566a5e 100644
--- a/jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js
+++ b/jstests/noPassthrough/hybrid_geo_index_remove_invalid_doc.js
@@ -5,67 +5,67 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- // Insert an invalid geo document that will be removed before the indexer starts a collecton
- // scan.
- assert.commandWorked(coll.insert({
- _id: 0,
- b: {type: 'invalid_geo_json_type', coordinates: [100, 100]},
- }));
+// Insert an invalid geo document that will be removed before the indexer starts a collecton
+// scan.
+assert.commandWorked(coll.insert({
+ _id: 0,
+ b: {type: 'invalid_geo_json_type', coordinates: [100, 100]},
+}));
- // We are using this fail point to pause the index build before it starts the collection scan.
- // This is important for this test because we are mutating the collection state before the index
- // builder is able to observe the invalid geo document.
- // By comparison, IndexBuildTest.pauseIndexBuilds() stalls the index build in the middle of the
- // collection scan.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
+// We are using this fail point to pause the index build before it starts the collection scan.
+// This is important for this test because we are mutating the collection state before the index
+// builder is able to observe the invalid geo document.
+// By comparison, IndexBuildTest.pauseIndexBuilds() stalls the index build in the middle of the
+// collection scan.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
- const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {b: '2dsphere'});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {b: '2dsphere'});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- // Insert a valid geo document to initialize the hybrid index builder's side table state.
- assert.commandWorked(coll.insert({
- b: {type: 'Polygon', coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]},
- }));
+// Insert a valid geo document to initialize the hybrid index builder's side table state.
+assert.commandWorked(coll.insert({
+ b: {type: 'Polygon', coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]},
+}));
- // Removing the invalid geo document should not cause any issues for the side table accounting.
- assert.commandWorked(coll.remove({_id: 0}));
+// Removing the invalid geo document should not cause any issues for the side table accounting.
+assert.commandWorked(coll.remove({_id: 0}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
- // Wait for the index build to finish. Since the invalid geo document is removed before the
- // index build scans the collection, the index should be built successfully.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
+// Wait for the index build to finish. Since the invalid geo document is removed before the
+// index build scans the collection, the index should be built successfully.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js b/jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js
index 3492726334d..d01ad9f4e82 100644
--- a/jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js
+++ b/jstests/noPassthrough/hybrid_geo_index_update_invalid_doc.js
@@ -5,63 +5,63 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- // Insert an invalid geo document that will be removed before the indexer starts a collecton
- // scan.
- assert.commandWorked(coll.insert({
- _id: 0,
- b: {type: 'invalid_geo_json_type', coordinates: [100, 100]},
- }));
+// Insert an invalid geo document that will be removed before the indexer starts a collecton
+// scan.
+assert.commandWorked(coll.insert({
+ _id: 0,
+ b: {type: 'invalid_geo_json_type', coordinates: [100, 100]},
+}));
- // We are using this fail point to pause the index build before it starts the collection scan.
- // This is important for this test because we are mutating the collection state before the index
- // builder is able to observe the invalid geo document.
- // By comparison, IndexBuildTest.pauseIndexBuilds() stalls the index build in the middle of the
- // collection scan.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
+// We are using this fail point to pause the index build before it starts the collection scan.
+// This is important for this test because we are mutating the collection state before the index
+// builder is able to observe the invalid geo document.
+// By comparison, IndexBuildTest.pauseIndexBuilds() stalls the index build in the middle of the
+// collection scan.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'alwaysOn'}));
- const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {b: '2dsphere'});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {b: '2dsphere'});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- // Fixing the invalid geo document should not cause any issues for the side table accounting.
- assert.commandWorked(coll.update(
- {_id: 0}, {b: {type: 'Polygon', coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]}}));
+// Fixing the invalid geo document should not cause any issues for the side table accounting.
+assert.commandWorked(coll.update(
+ {_id: 0}, {b: {type: 'Polygon', coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]}}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangAfterSettingUpIndexBuild', mode: 'off'}));
- // Wait for the index build to finish. Since the invalid geo document is removed before the
- // index build scans the collection, the index should be built successfully.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
+// Wait for the index build to finish. Since the invalid geo document is removed before the
+// index build scans the collection, the index should be built successfully.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_index_with_updates.js b/jstests/noPassthrough/hybrid_index_with_updates.js
index 869dce5b26c..3b9c2d89f7a 100644
--- a/jstests/noPassthrough/hybrid_index_with_updates.js
+++ b/jstests/noPassthrough/hybrid_index_with_updates.js
@@ -5,130 +5,130 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- let conn = MongoRunner.runMongod();
- let testDB = conn.getDB('test');
-
- let turnFailPointOn = function(failPointName, data) {
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: failPointName, mode: "alwaysOn", data: data || {}}));
- };
-
- let turnFailPointOff = function(failPointName) {
- assert.commandWorked(testDB.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- };
-
- let totalDocs = 0;
- let crudOpsForPhase = function(coll, phase) {
- let bulk = coll.initializeUnorderedBulkOp();
-
- // Create 1000 documents in a specific range for this phase.
- for (let i = 0; i < 1000; i++) {
- bulk.insert({i: (phase * 1000) + i});
- }
- totalDocs += 1000;
-
- if (phase <= 0) {
- assert.commandWorked(bulk.execute());
- return;
- }
-
- // Update 50 documents.
- // For example, if phase is 2, documents [100, 150) will be updated to [-100, -150).
- let start = (phase - 1) * 100;
- for (let j = start; j < (100 * phase) - 50; j++) {
- bulk.find({i: j}).update({$set: {i: -j}});
- }
- // Delete 25 documents.
- // Similarly, if phase is 2, documents [150, 200) will be removed.
- for (let j = start + 50; j < 100 * phase; j++) {
- bulk.find({i: j}).remove();
- }
- totalDocs -= 50;
+"use strict";
+load("jstests/libs/check_log.js");
+
+let conn = MongoRunner.runMongod();
+let testDB = conn.getDB('test');
+
+let turnFailPointOn = function(failPointName, data) {
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: failPointName, mode: "alwaysOn", data: data || {}}));
+};
+
+let turnFailPointOff = function(failPointName) {
+ assert.commandWorked(testDB.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+};
+
+let totalDocs = 0;
+let crudOpsForPhase = function(coll, phase) {
+ let bulk = coll.initializeUnorderedBulkOp();
+
+ // Create 1000 documents in a specific range for this phase.
+ for (let i = 0; i < 1000; i++) {
+ bulk.insert({i: (phase * 1000) + i});
+ }
+ totalDocs += 1000;
+
+ if (phase <= 0) {
assert.commandWorked(bulk.execute());
- };
+ return;
+ }
+
+ // Update 50 documents.
+ // For example, if phase is 2, documents [100, 150) will be updated to [-100, -150).
+ let start = (phase - 1) * 100;
+ for (let j = start; j < (100 * phase) - 50; j++) {
+ bulk.find({i: j}).update({$set: {i: -j}});
+ }
+ // Delete 25 documents.
+ // Similarly, if phase is 2, documents [150, 200) will be removed.
+ for (let j = start + 50; j < 100 * phase; j++) {
+ bulk.find({i: j}).remove();
+ }
+ totalDocs -= 50;
+
+ assert.commandWorked(bulk.execute());
+};
- crudOpsForPhase(testDB.hybrid, 0);
- assert.eq(totalDocs, testDB.hybrid.count());
+crudOpsForPhase(testDB.hybrid, 0);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Hang the build after the first document.
- let stopKey = {'i': 1};
- turnFailPointOn("hangBeforeIndexBuildOf", stopKey);
+// Hang the build after the first document.
+let stopKey = {'i': 1};
+turnFailPointOn("hangBeforeIndexBuildOf", stopKey);
- // Start the background build.
- let bgBuild = startParallelShell(function() {
- assert.commandWorked(db.hybrid.createIndex({i: 1}, {background: true}));
- }, conn.port);
+// Start the background build.
+let bgBuild = startParallelShell(function() {
+ assert.commandWorked(db.hybrid.createIndex({i: 1}, {background: true}));
+}, conn.port);
- checkLog.contains(conn, "Hanging before index build of i=1");
+checkLog.contains(conn, "Hanging before index build of i=1");
- // Phase 1: Collection scan and external sort
- // Insert documents while doing the bulk build.
- crudOpsForPhase(testDB.hybrid, 1);
- assert.eq(totalDocs, testDB.hybrid.count());
+// Phase 1: Collection scan and external sort
+// Insert documents while doing the bulk build.
+crudOpsForPhase(testDB.hybrid, 1);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Enable pause after bulk dump into index.
- turnFailPointOn("hangAfterIndexBuildDumpsInsertsFromBulk");
+// Enable pause after bulk dump into index.
+turnFailPointOn("hangAfterIndexBuildDumpsInsertsFromBulk");
- // Wait for the bulk insert to complete.
- turnFailPointOff("hangBeforeIndexBuildOf");
- checkLog.contains(conn, "Hanging after dumping inserts from bulk builder");
+// Wait for the bulk insert to complete.
+turnFailPointOff("hangBeforeIndexBuildOf");
+checkLog.contains(conn, "Hanging after dumping inserts from bulk builder");
- // Phase 2: First drain
- // Do some updates, inserts and deletes after the bulk builder has finished.
+// Phase 2: First drain
+// Do some updates, inserts and deletes after the bulk builder has finished.
- // Hang after yielding
- turnFailPointOn("hangDuringIndexBuildDrainYield", {namespace: testDB.hybrid.getFullName()});
+// Hang after yielding
+turnFailPointOn("hangDuringIndexBuildDrainYield", {namespace: testDB.hybrid.getFullName()});
- // Enable pause after first drain.
- turnFailPointOn("hangAfterIndexBuildFirstDrain");
+// Enable pause after first drain.
+turnFailPointOn("hangAfterIndexBuildFirstDrain");
- crudOpsForPhase(testDB.hybrid, 2);
- assert.eq(totalDocs, testDB.hybrid.count());
+crudOpsForPhase(testDB.hybrid, 2);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Allow first drain to start.
- turnFailPointOff("hangAfterIndexBuildDumpsInsertsFromBulk");
+// Allow first drain to start.
+turnFailPointOff("hangAfterIndexBuildDumpsInsertsFromBulk");
- // Ensure the operation yields during the drain, then attempt some operations.
- checkLog.contains(conn, "Hanging index build during drain yield");
- assert.commandWorked(testDB.hybrid.insert({i: "during yield"}));
- assert.commandWorked(testDB.hybrid.remove({i: "during yield"}));
- turnFailPointOff("hangDuringIndexBuildDrainYield");
+// Ensure the operation yields during the drain, then attempt some operations.
+checkLog.contains(conn, "Hanging index build during drain yield");
+assert.commandWorked(testDB.hybrid.insert({i: "during yield"}));
+assert.commandWorked(testDB.hybrid.remove({i: "during yield"}));
+turnFailPointOff("hangDuringIndexBuildDrainYield");
- // Wait for first drain to finish.
- checkLog.contains(conn, "Hanging after index build first drain");
+// Wait for first drain to finish.
+checkLog.contains(conn, "Hanging after index build first drain");
- // Phase 3: Second drain
- // Enable pause after second drain.
- turnFailPointOn("hangAfterIndexBuildSecondDrain");
+// Phase 3: Second drain
+// Enable pause after second drain.
+turnFailPointOn("hangAfterIndexBuildSecondDrain");
- // Add inserts that must be consumed in the second drain.
- crudOpsForPhase(testDB.hybrid, 3);
- assert.eq(totalDocs, testDB.hybrid.count());
+// Add inserts that must be consumed in the second drain.
+crudOpsForPhase(testDB.hybrid, 3);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Allow second drain to start.
- turnFailPointOff("hangAfterIndexBuildFirstDrain");
+// Allow second drain to start.
+turnFailPointOff("hangAfterIndexBuildFirstDrain");
- // Wait for second drain to finish.
- checkLog.contains(conn, "Hanging after index build second drain");
+// Wait for second drain to finish.
+checkLog.contains(conn, "Hanging after index build second drain");
- // Phase 4: Final drain and commit.
- // Add inserts that must be consumed in the final drain.
- crudOpsForPhase(testDB.hybrid, 4);
- assert.eq(totalDocs, testDB.hybrid.count());
+// Phase 4: Final drain and commit.
+// Add inserts that must be consumed in the final drain.
+crudOpsForPhase(testDB.hybrid, 4);
+assert.eq(totalDocs, testDB.hybrid.count());
- // Allow final drain to start.
- turnFailPointOff("hangAfterIndexBuildSecondDrain");
+// Allow final drain to start.
+turnFailPointOff("hangAfterIndexBuildSecondDrain");
- // Wait for build to complete.
- bgBuild();
+// Wait for build to complete.
+bgBuild();
- assert.eq(totalDocs, testDB.hybrid.count());
- assert.commandWorked(testDB.hybrid.validate({full: true}));
+assert.eq(totalDocs, testDB.hybrid.count());
+assert.commandWorked(testDB.hybrid.validate({full: true}));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/hybrid_partial_geo_index.js b/jstests/noPassthrough/hybrid_partial_geo_index.js
index 8e204647cdf..7418c489eea 100644
--- a/jstests/noPassthrough/hybrid_partial_geo_index.js
+++ b/jstests/noPassthrough/hybrid_partial_geo_index.js
@@ -4,68 +4,70 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- // Create a 2dsphere partial index for documents where 'a', the field in the filter expression,
- // is greater than 0.
- const partialIndex = {b: '2dsphere'};
- const createIdx = IndexBuildTest.startIndexBuild(
- primary, coll.getFullName(), partialIndex, {partialFilterExpression: {a: {$gt: 0}}});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+// Create a 2dsphere partial index for documents where 'a', the field in the filter expression,
+// is greater than 0.
+const partialIndex = {
+ b: '2dsphere'
+};
+const createIdx = IndexBuildTest.startIndexBuild(
+ primary, coll.getFullName(), partialIndex, {partialFilterExpression: {a: {$gt: 0}}});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- // This document has an invalid geoJSON format (duplicated points), but will not be indexed.
- const unindexedDoc = {
- _id: 0,
- a: -1,
- b: {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 1], [0, 1], [0, 0]]]},
- };
+// This document has an invalid geoJSON format (duplicated points), but will not be indexed.
+const unindexedDoc = {
+ _id: 0,
+ a: -1,
+ b: {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 1], [0, 1], [0, 0]]]},
+};
- // This document has valid geoJson, and will be indexed.
- const indexedDoc = {
- _id: 1,
- a: 1,
- b: {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]},
- };
+// This document has valid geoJson, and will be indexed.
+const indexedDoc = {
+ _id: 1,
+ a: 1,
+ b: {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]},
+};
- assert.commandWorked(coll.insert(unindexedDoc));
- assert.commandWorked(coll.insert(indexedDoc));
+assert.commandWorked(coll.insert(unindexedDoc));
+assert.commandWorked(coll.insert(indexedDoc));
- // Removing unindexed document should succeed without error.
- assert.commandWorked(coll.remove({_id: 0}));
+// Removing unindexed document should succeed without error.
+assert.commandWorked(coll.remove({_id: 0}));
- IndexBuildTest.resumeIndexBuilds(primary);
+IndexBuildTest.resumeIndexBuilds(primary);
- // Wait for the index build to finish.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
+// Wait for the index build to finish.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'b_2dsphere']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_partial_index_update.js b/jstests/noPassthrough/hybrid_partial_index_update.js
index 79d9f9cb48e..878cd334ce4 100644
--- a/jstests/noPassthrough/hybrid_partial_index_update.js
+++ b/jstests/noPassthrough/hybrid_partial_index_update.js
@@ -4,53 +4,55 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+'use strict';
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- // Create a partial index for documents where 'a', the field in the filter expression,
- // is equal to 1.
- const partialIndex = {a: 1};
- const createIdx = IndexBuildTest.startIndexBuild(
- primary, coll.getFullName(), partialIndex, {partialFilterExpression: {a: 1}});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+// Create a partial index for documents where 'a', the field in the filter expression,
+// is equal to 1.
+const partialIndex = {
+ a: 1
+};
+const createIdx = IndexBuildTest.startIndexBuild(
+ primary, coll.getFullName(), partialIndex, {partialFilterExpression: {a: 1}});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- assert.commandWorked(coll.insert({_id: 0, a: 1}));
+assert.commandWorked(coll.insert({_id: 0, a: 1}));
- // Update the document so that it no longer meets the partial index criteria.
- assert.commandWorked(coll.update({_id: 0}, {$set: {a: 0}}));
+// Update the document so that it no longer meets the partial index criteria.
+assert.commandWorked(coll.update({_id: 0}, {$set: {a: 0}}));
- IndexBuildTest.resumeIndexBuilds(primary);
+IndexBuildTest.resumeIndexBuilds(primary);
- // Wait for the index build to finish.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
+// Wait for the index build to finish.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_sparse_compound_geo_index.js b/jstests/noPassthrough/hybrid_sparse_compound_geo_index.js
index a6f50f70151..e9a74de9982 100644
--- a/jstests/noPassthrough/hybrid_sparse_compound_geo_index.js
+++ b/jstests/noPassthrough/hybrid_sparse_compound_geo_index.js
@@ -5,47 +5,47 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+'use strict';
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.commandWorked(testDB.createCollection(coll.getName()));
+assert.commandWorked(testDB.createCollection(coll.getName()));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- const createIdx = IndexBuildTest.startIndexBuild(
- primary, coll.getFullName(), {a: 1, b: '2dsphere'}, {sparse: true});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+const createIdx = IndexBuildTest.startIndexBuild(
+ primary, coll.getFullName(), {a: 1, b: '2dsphere'}, {sparse: true});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- assert.commandWorked(coll.insert({a: [1, 2]}));
+assert.commandWorked(coll.insert({a: [1, 2]}));
- IndexBuildTest.resumeIndexBuilds(primary);
+IndexBuildTest.resumeIndexBuilds(primary);
- // Wait for the index build to finish.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1_b_2dsphere']);
+// Wait for the index build to finish.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1_b_2dsphere']);
- let res = assert.commandWorked(coll.validate({full: true}));
- assert(res.valid, 'validation failed on primary: ' + tojson(res));
+let res = assert.commandWorked(coll.validate({full: true}));
+assert(res.valid, 'validation failed on primary: ' + tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/hybrid_unique_index_with_updates.js b/jstests/noPassthrough/hybrid_unique_index_with_updates.js
index 849e155b7e7..38a83b30a37 100644
--- a/jstests/noPassthrough/hybrid_unique_index_with_updates.js
+++ b/jstests/noPassthrough/hybrid_unique_index_with_updates.js
@@ -6,178 +6,177 @@
* @tags: [requires_document_locking, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- let replSetTest = new ReplSetTest({name: "hybrid_updates", nodes: 2});
- replSetTest.startSet();
- replSetTest.initiate();
+let replSetTest = new ReplSetTest({name: "hybrid_updates", nodes: 2});
+replSetTest.startSet();
+replSetTest.initiate();
- let conn = replSetTest.getPrimary();
- let testDB = conn.getDB('test');
+let conn = replSetTest.getPrimary();
+let testDB = conn.getDB('test');
- // Enables a failpoint, runs 'hitFailpointFunc' to hit the failpoint, then runs
- // 'duringFailpointFunc' while the failpoint is active.
- let doDuringFailpoint = function(
- failPointName, logMessage, hitFailpointFunc, duringFailpointFunc, i) {
- clearRawMongoProgramOutput();
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}}));
+// Enables a failpoint, runs 'hitFailpointFunc' to hit the failpoint, then runs
+// 'duringFailpointFunc' while the failpoint is active.
+let doDuringFailpoint = function(
+ failPointName, logMessage, hitFailpointFunc, duringFailpointFunc, i) {
+ clearRawMongoProgramOutput();
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}}));
- hitFailpointFunc();
+ hitFailpointFunc();
- assert.soon(() => rawMongoProgramOutput().indexOf(logMessage) >= 0);
+ assert.soon(() => rawMongoProgramOutput().indexOf(logMessage) >= 0);
- duringFailpointFunc();
+ duringFailpointFunc();
- assert.commandWorked(testDB.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- };
+ assert.commandWorked(testDB.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+};
- const docsToInsert = 1000;
- let setUp = function(coll) {
- coll.drop();
+const docsToInsert = 1000;
+let setUp = function(coll) {
+ coll.drop();
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < docsToInsert; i++) {
- bulk.insert({i: i});
- }
- assert.commandWorked(bulk.execute());
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < docsToInsert; i++) {
+ bulk.insert({i: i});
+ }
+ assert.commandWorked(bulk.execute());
+};
+
+let buildIndexInBackground = function(coll, expectDuplicateKeyError) {
+ const createIndexFunction = function(collFullName) {
+ const coll = db.getMongo().getCollection(collFullName);
+ return coll.createIndex({i: 1}, {background: true, unique: true});
};
+ const assertFunction = expectDuplicateKeyError ? function(collFullName) {
+ assert.commandFailedWithCode(createIndexFunction(collFullName), ErrorCodes.DuplicateKey);
+ } : function(collFullName) {
+ assert.commandWorked(createIndexFunction(collFullName));
+ };
+ return startParallelShell('const createIndexFunction = ' + createIndexFunction + ';\n' +
+ 'const assertFunction = ' + assertFunction + ';\n' +
+ 'assertFunction("' + coll.getFullName() + '")',
+ conn.port);
+};
- let buildIndexInBackground = function(coll, expectDuplicateKeyError) {
- const createIndexFunction = function(collFullName) {
- const coll = db.getMongo().getCollection(collFullName);
- return coll.createIndex({i: 1}, {background: true, unique: true});
- };
- const assertFunction = expectDuplicateKeyError ? function(collFullName) {
- assert.commandFailedWithCode(createIndexFunction(collFullName),
- ErrorCodes.DuplicateKey);
- } : function(collFullName) {
- assert.commandWorked(createIndexFunction(collFullName));
- };
- return startParallelShell('const createIndexFunction = ' + createIndexFunction + ';\n' +
- 'const assertFunction = ' + assertFunction + ';\n' +
- 'assertFunction("' + coll.getFullName() + '")',
- conn.port);
+/**
+ * Run a background index build on a unique index under different configurations. Introduce
+ * duplicate keys on the index that may cause it to fail or succeed, depending on the following
+ * optional parameters:
+ * {
+ * // Which operation used to introduce a duplicate key.
+ * operation {string}: "insert", "update"
+ *
+ * // Whether or not resolve the duplicate key before completing the build.
+ * resolve {bool}
+ *
+ * // Which phase of the index build to introduce the duplicate key.
+ * phase {number}: 0-4
+ * }
+ */
+let runTest = function(config) {
+ jsTestLog("running test with config: " + tojson(config));
+
+ const collName = Object.keys(config).length
+ ? 'hybrid_' + config.operation[0] + '_r' + Number(config.resolve) + '_p' + config.phase
+ : 'hybrid';
+ const coll = testDB.getCollection(collName);
+ setUp(coll);
+
+ // Expect the build to fail with a duplicate key error if we insert a duplicate key and
+ // don't resolve it.
+ let expectDuplicate = config.resolve === false;
+
+ let awaitBuild;
+ let buildIndex = function() {
+ awaitBuild = buildIndexInBackground(coll, expectDuplicate);
};
- /**
- * Run a background index build on a unique index under different configurations. Introduce
- * duplicate keys on the index that may cause it to fail or succeed, depending on the following
- * optional parameters:
- * {
- * // Which operation used to introduce a duplicate key.
- * operation {string}: "insert", "update"
- *
- * // Whether or not resolve the duplicate key before completing the build.
- * resolve {bool}
- *
- * // Which phase of the index build to introduce the duplicate key.
- * phase {number}: 0-4
- * }
- */
- let runTest = function(config) {
- jsTestLog("running test with config: " + tojson(config));
-
- const collName = Object.keys(config).length
- ? 'hybrid_' + config.operation[0] + '_r' + Number(config.resolve) + '_p' + config.phase
- : 'hybrid';
- const coll = testDB.getCollection(collName);
- setUp(coll);
-
- // Expect the build to fail with a duplicate key error if we insert a duplicate key and
- // don't resolve it.
- let expectDuplicate = config.resolve === false;
-
- let awaitBuild;
- let buildIndex = function() {
- awaitBuild = buildIndexInBackground(coll, expectDuplicate);
- };
-
- // Introduce a duplicate key, either from an insert or update. Optionally, follow-up with an
- // operation that will resolve the duplicate by removing it or updating it.
- const dup = {i: 0};
- let doOperation = function() {
- if ("insert" == config.operation) {
- assert.commandWorked(coll.insert(dup));
- if (config.resolve) {
- assert.commandWorked(coll.deleteOne(dup));
- }
- } else if ("update" == config.operation) {
- assert.commandWorked(coll.update(dup, {i: 1}));
- if (config.resolve) {
- assert.commandWorked(coll.update({i: 1}, dup));
- }
+ // Introduce a duplicate key, either from an insert or update. Optionally, follow-up with an
+ // operation that will resolve the duplicate by removing it or updating it.
+ const dup = {i: 0};
+ let doOperation = function() {
+ if ("insert" == config.operation) {
+ assert.commandWorked(coll.insert(dup));
+ if (config.resolve) {
+ assert.commandWorked(coll.deleteOne(dup));
+ }
+ } else if ("update" == config.operation) {
+ assert.commandWorked(coll.update(dup, {i: 1}));
+ if (config.resolve) {
+ assert.commandWorked(coll.update({i: 1}, dup));
}
- };
-
- const stopKey = 0;
- switch (config.phase) {
- // Just build the index without any failpoints.
- case undefined:
- buildIndex();
- break;
- // Hang before scanning the first document.
- case 0:
- doDuringFailpoint("hangBeforeIndexBuildOf",
- "Hanging before index build of i=" + stopKey,
- buildIndex,
- doOperation,
- stopKey);
- break;
- // Hang after scanning the first document.
- case 1:
- doDuringFailpoint("hangAfterIndexBuildOf",
- "Hanging after index build of i=" + stopKey,
- buildIndex,
- doOperation,
- stopKey);
- break;
- // Hang before the first drain and after dumping the keys from the external sorter into
- // the index.
- case 2:
- doDuringFailpoint("hangAfterIndexBuildDumpsInsertsFromBulk",
- "Hanging after dumping inserts from bulk builder",
- buildIndex,
- doOperation);
- break;
- // Hang before the second drain.
- case 3:
- doDuringFailpoint("hangAfterIndexBuildFirstDrain",
- "Hanging after index build first drain",
- buildIndex,
- doOperation);
- break;
- // Hang before the final drain and commit.
- case 4:
- doDuringFailpoint("hangAfterIndexBuildSecondDrain",
- "Hanging after index build second drain",
- buildIndex,
- doOperation);
- break;
- default:
- assert(false, "Invalid phase: " + config.phase);
}
+ };
- awaitBuild();
+ const stopKey = 0;
+ switch (config.phase) {
+ // Just build the index without any failpoints.
+ case undefined:
+ buildIndex();
+ break;
+ // Hang before scanning the first document.
+ case 0:
+ doDuringFailpoint("hangBeforeIndexBuildOf",
+ "Hanging before index build of i=" + stopKey,
+ buildIndex,
+ doOperation,
+ stopKey);
+ break;
+ // Hang after scanning the first document.
+ case 1:
+ doDuringFailpoint("hangAfterIndexBuildOf",
+ "Hanging after index build of i=" + stopKey,
+ buildIndex,
+ doOperation,
+ stopKey);
+ break;
+ // Hang before the first drain and after dumping the keys from the external sorter into
+ // the index.
+ case 2:
+ doDuringFailpoint("hangAfterIndexBuildDumpsInsertsFromBulk",
+ "Hanging after dumping inserts from bulk builder",
+ buildIndex,
+ doOperation);
+ break;
+ // Hang before the second drain.
+ case 3:
+ doDuringFailpoint("hangAfterIndexBuildFirstDrain",
+ "Hanging after index build first drain",
+ buildIndex,
+ doOperation);
+ break;
+ // Hang before the final drain and commit.
+ case 4:
+ doDuringFailpoint("hangAfterIndexBuildSecondDrain",
+ "Hanging after index build second drain",
+ buildIndex,
+ doOperation);
+ break;
+ default:
+ assert(false, "Invalid phase: " + config.phase);
+ }
- let expectedDocs = docsToInsert;
- expectedDocs += (config.operation == "insert" && config.resolve === false) ? 1 : 0;
+ awaitBuild();
- assert.eq(expectedDocs, coll.count());
- assert.eq(expectedDocs, coll.find().itcount());
- assert.commandWorked(coll.validate({full: true}));
- };
+ let expectedDocs = docsToInsert;
+ expectedDocs += (config.operation == "insert" && config.resolve === false) ? 1 : 0;
- runTest({});
+ assert.eq(expectedDocs, coll.count());
+ assert.eq(expectedDocs, coll.find().itcount());
+ assert.commandWorked(coll.validate({full: true}));
+};
- for (let i = 0; i <= 4; i++) {
- runTest({operation: "insert", resolve: true, phase: i});
- runTest({operation: "insert", resolve: false, phase: i});
- runTest({operation: "update", resolve: true, phase: i});
- runTest({operation: "update", resolve: false, phase: i});
- }
+runTest({});
+
+for (let i = 0; i <= 4; i++) {
+ runTest({operation: "insert", resolve: true, phase: i});
+ runTest({operation: "insert", resolve: false, phase: i});
+ runTest({operation: "update", resolve: true, phase: i});
+ runTest({operation: "update", resolve: false, phase: i});
+}
- replSetTest.stopSet();
+replSetTest.stopSet();
})();
diff --git a/jstests/noPassthrough/hyphenated_database_name.js b/jstests/noPassthrough/hyphenated_database_name.js
index 6387e8f167f..0290e4444d9 100644
--- a/jstests/noPassthrough/hyphenated_database_name.js
+++ b/jstests/noPassthrough/hyphenated_database_name.js
@@ -4,21 +4,21 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
- var isDirectoryPerDBSupported = jsTest.options().storageEngine == "wiredTiger" ||
- jsTest.options().storageEngine == "mmapv1" || !jsTest.options().storageEngine;
- if (!isDirectoryPerDBSupported)
- return;
+"use strict";
+var isDirectoryPerDBSupported = jsTest.options().storageEngine == "wiredTiger" ||
+ jsTest.options().storageEngine == "mmapv1" || !jsTest.options().storageEngine;
+if (!isDirectoryPerDBSupported)
+ return;
- const dbName = "test-hyphen";
- let conn = MongoRunner.runMongod({directoryperdb: ''});
+const dbName = "test-hyphen";
+let conn = MongoRunner.runMongod({directoryperdb: ''});
- conn.getDB(dbName).a.insert({x: 1});
- let res = conn.getDB(dbName).runCommand({dbStats: 1, scale: 1});
- jsTestLog("dbStats: " + tojson(res));
- assert(res.db == "test-hyphen");
- assert(res.fsUsedSize > 0);
- assert(res.fsTotalSize > 0);
+conn.getDB(dbName).a.insert({x: 1});
+let res = conn.getDB(dbName).runCommand({dbStats: 1, scale: 1});
+jsTestLog("dbStats: " + tojson(res));
+assert(res.db == "test-hyphen");
+assert(res.fsUsedSize > 0);
+assert(res.fsTotalSize > 0);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/ignore_notablescan.js b/jstests/noPassthrough/ignore_notablescan.js
index ccdfa9ebfce..255b646f757 100644
--- a/jstests/noPassthrough/ignore_notablescan.js
+++ b/jstests/noPassthrough/ignore_notablescan.js
@@ -1,73 +1,71 @@
// Test that 'notablescan' parameter does not affect queries internal namespaces.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- function runTests(ServerType) {
- const s = new ServerType();
+function runTests(ServerType) {
+ const s = new ServerType();
- const configDB = s.getConn().getDB("config");
- const session = s.getConn().getDB(dbName).getMongo().startSession();
- const primaryDB = session.getDatabase(dbName);
+ const configDB = s.getConn().getDB("config");
+ const session = s.getConn().getDB(dbName).getMongo().startSession();
+ const primaryDB = session.getDatabase(dbName);
- // Implicitly create the collection outside of the transaction.
- assert.writeOK(primaryDB.getCollection(collName).insert({x: 1}));
+ // Implicitly create the collection outside of the transaction.
+ assert.writeOK(primaryDB.getCollection(collName).insert({x: 1}));
- // Run a transaction so the 'config.transactions' collection is implicitly created.
- session.startTransaction();
- assert.writeOK(primaryDB.getCollection(collName).insert({x: 2}));
- assert.commandWorked(session.commitTransaction_forTesting());
+ // Run a transaction so the 'config.transactions' collection is implicitly created.
+ session.startTransaction();
+ assert.writeOK(primaryDB.getCollection(collName).insert({x: 2}));
+ assert.commandWorked(session.commitTransaction_forTesting());
- // Run a predicate query that would fail if we did not ignore the 'notablescan' flag.
- assert.eq(configDB.transactions.find({any_nonexistent_field: {$exists: true}}).itcount(),
- 0);
+ // Run a predicate query that would fail if we did not ignore the 'notablescan' flag.
+ assert.eq(configDB.transactions.find({any_nonexistent_field: {$exists: true}}).itcount(), 0);
- // Run the same query against the user created collection honoring the 'notablescan' flag.
- // This will cause the query to fail as there is no viable query plan. Unfortunately,
- // the reported query error code is the cryptic 'BadValue'.
- assert.commandFailedWithCode(
- primaryDB.runCommand(
- {find: collName, filter: {any_nonexistent_field: {$exists: true}}}),
- ErrorCodes.BadValue);
+ // Run the same query against the user created collection honoring the 'notablescan' flag.
+ // This will cause the query to fail as there is no viable query plan. Unfortunately,
+ // the reported query error code is the cryptic 'BadValue'.
+ assert.commandFailedWithCode(
+ primaryDB.runCommand({find: collName, filter: {any_nonexistent_field: {$exists: true}}}),
+ ErrorCodes.BadValue);
- s.stop();
- }
+ s.stop();
+}
- function Sharding() {
- this.st = new ShardingTest({
- shards: 2,
- config: 1,
- other: {
- shardOptions: {setParameter: {notablescan: true}},
- configOptions: {setParameter: {notablescan: true}}
- }
- });
- }
+function Sharding() {
+ this.st = new ShardingTest({
+ shards: 2,
+ config: 1,
+ other: {
+ shardOptions: {setParameter: {notablescan: true}},
+ configOptions: {setParameter: {notablescan: true}}
+ }
+ });
+}
- Sharding.prototype.stop = function() {
- this.st.stop();
- };
+Sharding.prototype.stop = function() {
+ this.st.stop();
+};
- Sharding.prototype.getConn = function() {
- return this.st.s0;
- };
+Sharding.prototype.getConn = function() {
+ return this.st.s0;
+};
- function ReplSet() {
- this.rst = new ReplSetTest({nodes: 1, nodeOptions: {setParameter: {notablescan: true}}});
- this.rst.startSet();
- this.rst.initiate();
- }
+function ReplSet() {
+ this.rst = new ReplSetTest({nodes: 1, nodeOptions: {setParameter: {notablescan: true}}});
+ this.rst.startSet();
+ this.rst.initiate();
+}
- ReplSet.prototype.stop = function() {
- this.rst.stopSet();
- };
+ReplSet.prototype.stop = function() {
+ this.rst.stopSet();
+};
- ReplSet.prototype.getConn = function() {
- return this.rst.getPrimary();
- };
+ReplSet.prototype.getConn = function() {
+ return this.rst.getPrimary();
+};
- [ReplSet, Sharding].forEach(runTests);
+[ReplSet, Sharding].forEach(runTests);
}());
diff --git a/jstests/noPassthrough/implicit_sessions.js b/jstests/noPassthrough/implicit_sessions.js
index 77204e098e9..f0bb9d972f9 100644
--- a/jstests/noPassthrough/implicit_sessions.js
+++ b/jstests/noPassthrough/implicit_sessions.js
@@ -2,245 +2,244 @@
* Verifies behavior around implicit sessions in the mongo shell.
*/
(function() {
- "use strict";
-
- /**
- * Runs the given function, inspecting the outgoing command object and making assertions about
- * its logical session id.
- */
- function inspectCommandForSessionId(func, {shouldIncludeId, expectedId, differentFromId}) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
-
- const sentinel = {};
- let cmdObjSeen = sentinel;
-
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
-
- try {
- assert.doesNotThrow(func);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
+"use strict";
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
- }
+/**
+ * Runs the given function, inspecting the outgoing command object and making assertions about
+ * its logical session id.
+ */
+function inspectCommandForSessionId(func, {shouldIncludeId, expectedId, differentFromId}) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- let cmdName = Object.keys(cmdObjSeen)[0];
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObjSeen)[0];
- }
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- if (shouldIncludeId) {
- assert(cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) + " to have a logical session id.");
-
- if (expectedId) {
- assert(bsonBinaryEqual(expectedId, cmdObjSeen.lsid),
- "The sent session id did not match the expected, sent: " +
- tojson(cmdObjSeen.lsid) + ", expected: " + tojson(expectedId));
- }
-
- if (differentFromId) {
- assert(!bsonBinaryEqual(differentFromId, cmdObjSeen.lsid),
- "The sent session id was not different from the expected, sent: " +
- tojson(cmdObjSeen.lsid) + ", expected: " + tojson(differentFromId));
- }
-
- } else {
- assert(
- !cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) + " to not have a logical session id.");
- }
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- return cmdObjSeen.lsid;
+ try {
+ assert.doesNotThrow(func);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
}
- // Tests regular behavior of implicit sessions.
- function runTest() {
- const conn = MongoRunner.runMongod();
-
- // Commands run on a database without an explicit session should use an implicit one.
- const testDB = conn.getDB("test");
- const coll = testDB.getCollection("foo");
- const implicitId = inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true});
-
- // Unacknowledged writes have no session id.
- inspectCommandForSessionId(function() {
- coll.insert({x: 1}, {writeConcern: {w: 0}});
- }, {shouldIncludeId: false});
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
+ }
- assert(bsonBinaryEqual(testDB.getSession().getSessionId(), implicitId),
- "Expected the id of the database's implicit session to match the one sent, sent: " +
- tojson(implicitId) + " db session id: " +
- tojson(testDB.getSession().getSessionId()));
-
- // Implicit sessions are not causally consistent.
- assert(!testDB.getSession().getOptions().isCausalConsistency(),
- "Expected the database's implicit session to not be causally consistent");
-
- // Further commands run on the same database should reuse the implicit session.
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // New collections from the same database should inherit the implicit session.
- const collTwo = testDB.getCollection("bar");
- inspectCommandForSessionId(function() {
- assert.writeOK(collTwo.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // Sibling databases should inherit the implicit session.
- let siblingColl = testDB.getSiblingDB("foo").getCollection("bar");
- inspectCommandForSessionId(function() {
- assert.writeOK(siblingColl.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // A new database from the same connection should inherit the implicit session.
- const newCollSameConn = conn.getDB("testTwo").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newCollSameConn.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // A new database from a new connection should use a different implicit session.
- const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newCollNewConn.insert({x: 1}));
- }, {shouldIncludeId: true, differentFromId: implicitId});
-
- // The original implicit session should still live on the first database.
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // Databases created from an explicit session should override any implicit sessions.
- const session = conn.startSession();
- const sessionColl = session.getDatabase("test").getCollection("foo");
- const explicitId = inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
- }, {shouldIncludeId: true, differentFromId: implicitId});
-
- assert(bsonBinaryEqual(session.getSessionId(), explicitId),
- "Expected the id of the explicit session to match the one sent, sent: " +
- tojson(explicitId) + " explicit session id: " + tojson(session.getSessionId()));
- assert(bsonBinaryEqual(sessionColl.getDB().getSession().getSessionId(), explicitId),
- "Expected id of the database's session to match the explicit session's id, sent: " +
- tojson(sessionColl.getDB().getSession().getSessionId()) +
- ", explicit session id: " + tojson(session.getSessionId()));
-
- // The original implicit session should still live on the first database.
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- // New databases on the same connection as the explicit session should still inherit the
- // original implicit session.
- const newCollSameConnAfter = conn.getDB("testThree").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newCollSameConnAfter.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
-
- session.endSession();
- MongoRunner.stopMongod(conn);
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ let cmdName = Object.keys(cmdObjSeen)[0];
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObjSeen)[0];
}
- // Tests behavior when the test flag to disable implicit sessions is changed.
- function runTestTransitionToDisabled() {
- const conn = MongoRunner.runMongod();
+ if (shouldIncludeId) {
+ assert(cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) + " to have a logical session id.");
- // Existing implicit sessions should be erased when the disable flag is set.
- const coll = conn.getDB("test").getCollection("foo");
- const implicitId = inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true});
+ if (expectedId) {
+ assert(bsonBinaryEqual(expectedId, cmdObjSeen.lsid),
+ "The sent session id did not match the expected, sent: " +
+ tojson(cmdObjSeen.lsid) + ", expected: " + tojson(expectedId));
+ }
- TestData.disableImplicitSessions = true;
+ if (differentFromId) {
+ assert(!bsonBinaryEqual(differentFromId, cmdObjSeen.lsid),
+ "The sent session id was not different from the expected, sent: " +
+ tojson(cmdObjSeen.lsid) + ", expected: " + tojson(differentFromId));
+ }
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: false});
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) + " to not have a logical session id.");
+ }
- // After the flag is unset, databases using existing connections with implicit sessions will
- // use the original implicit sessions again and new connections will create and use new
- // implicit sessions.
- TestData.disableImplicitSessions = false;
+ return cmdObjSeen.lsid;
+}
+
+// Tests regular behavior of implicit sessions.
+function runTest() {
+ const conn = MongoRunner.runMongod();
+
+ // Commands run on a database without an explicit session should use an implicit one.
+ const testDB = conn.getDB("test");
+ const coll = testDB.getCollection("foo");
+ const implicitId = inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true});
+
+ // Unacknowledged writes have no session id.
+ inspectCommandForSessionId(function() {
+ coll.insert({x: 1}, {writeConcern: {w: 0}});
+ }, {shouldIncludeId: false});
+
+ assert(bsonBinaryEqual(testDB.getSession().getSessionId(), implicitId),
+ "Expected the id of the database's implicit session to match the one sent, sent: " +
+ tojson(implicitId) +
+ " db session id: " + tojson(testDB.getSession().getSessionId()));
+
+ // Implicit sessions are not causally consistent.
+ assert(!testDB.getSession().getOptions().isCausalConsistency(),
+ "Expected the database's implicit session to not be causally consistent");
+
+ // Further commands run on the same database should reuse the implicit session.
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // New collections from the same database should inherit the implicit session.
+ const collTwo = testDB.getCollection("bar");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(collTwo.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // Sibling databases should inherit the implicit session.
+ let siblingColl = testDB.getSiblingDB("foo").getCollection("bar");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(siblingColl.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // A new database from the same connection should inherit the implicit session.
+ const newCollSameConn = conn.getDB("testTwo").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newCollSameConn.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // A new database from a new connection should use a different implicit session.
+ const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newCollNewConn.insert({x: 1}));
+ }, {shouldIncludeId: true, differentFromId: implicitId});
+
+ // The original implicit session should still live on the first database.
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // Databases created from an explicit session should override any implicit sessions.
+ const session = conn.startSession();
+ const sessionColl = session.getDatabase("test").getCollection("foo");
+ const explicitId = inspectCommandForSessionId(function() {
+ assert.writeOK(sessionColl.insert({x: 1}));
+ }, {shouldIncludeId: true, differentFromId: implicitId});
+
+ assert(bsonBinaryEqual(session.getSessionId(), explicitId),
+ "Expected the id of the explicit session to match the one sent, sent: " +
+ tojson(explicitId) + " explicit session id: " + tojson(session.getSessionId()));
+ assert(bsonBinaryEqual(sessionColl.getDB().getSession().getSessionId(), explicitId),
+ "Expected id of the database's session to match the explicit session's id, sent: " +
+ tojson(sessionColl.getDB().getSession().getSessionId()) +
+ ", explicit session id: " + tojson(session.getSessionId()));
+
+ // The original implicit session should still live on the first database.
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ // New databases on the same connection as the explicit session should still inherit the
+ // original implicit session.
+ const newCollSameConnAfter = conn.getDB("testThree").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newCollSameConnAfter.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
+
+ session.endSession();
+ MongoRunner.stopMongod(conn);
+}
+
+// Tests behavior when the test flag to disable implicit sessions is changed.
+function runTestTransitionToDisabled() {
+ const conn = MongoRunner.runMongod();
+
+ // Existing implicit sessions should be erased when the disable flag is set.
+ const coll = conn.getDB("test").getCollection("foo");
+ const implicitId = inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true});
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
+ TestData.disableImplicitSessions = true;
- const newColl = conn.getDB("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newColl.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: implicitId});
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: false});
- const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(newCollNewConn.insert({x: 1}));
- }, {shouldIncludeId: true, differentFromId: implicitId});
+ // After the flag is unset, databases using existing connections with implicit sessions will
+ // use the original implicit sessions again and new connections will create and use new
+ // implicit sessions.
+ TestData.disableImplicitSessions = false;
- // Explicit sessions should not be affected by the disable flag being set.
- const session = conn.startSession();
- const sessionColl = session.getDatabase("test").getCollection("foo");
- const explicitId = inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
- }, {shouldIncludeId: true});
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
- TestData.disableImplicitSessions = true;
+ const newColl = conn.getDB("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newColl.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: implicitId});
- inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
- }, {shouldIncludeId: true, expectedId: explicitId});
+ const newCollNewConn = new Mongo(conn.host).getDB("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(newCollNewConn.insert({x: 1}));
+ }, {shouldIncludeId: true, differentFromId: implicitId});
- session.endSession();
- MongoRunner.stopMongod(conn);
- }
+ // Explicit sessions should not be affected by the disable flag being set.
+ const session = conn.startSession();
+ const sessionColl = session.getDatabase("test").getCollection("foo");
+ const explicitId = inspectCommandForSessionId(function() {
+ assert.writeOK(sessionColl.insert({x: 1}));
+ }, {shouldIncludeId: true});
- // Tests behavior of implicit sessions when they are disabled via a test flag.
- function runTestDisabled() {
- const conn = MongoRunner.runMongod();
+ TestData.disableImplicitSessions = true;
- // Commands run without an explicit session should not use an implicit one.
- const coll = conn.getDB("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(coll.insert({x: 1}));
+ inspectCommandForSessionId(function() {
+ assert.writeOK(sessionColl.insert({x: 1}));
+ }, {shouldIncludeId: true, expectedId: explicitId});
+
+ session.endSession();
+ MongoRunner.stopMongod(conn);
+}
+
+// Tests behavior of implicit sessions when they are disabled via a test flag.
+function runTestDisabled() {
+ const conn = MongoRunner.runMongod();
+
+ // Commands run without an explicit session should not use an implicit one.
+ const coll = conn.getDB("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(coll.insert({x: 1}));
+ }, {shouldIncludeId: false});
+
+ // Explicit sessions should still include session ids.
+ const session = conn.startSession();
+ const sessionColl = session.getDatabase("test").getCollection("foo");
+ inspectCommandForSessionId(function() {
+ assert.writeOK(sessionColl.insert({x: 1}));
+ }, {shouldIncludeId: true});
+
+ // Commands run in a parallel shell inherit the disable flag.
+ TestData.inspectCommandForSessionId = inspectCommandForSessionId;
+ const awaitShell = startParallelShell(function() {
+ const parallelColl = db.getCollection("foo");
+ TestData.inspectCommandForSessionId(function() {
+ assert.writeOK(parallelColl.insert({x: 1}));
}, {shouldIncludeId: false});
+ }, conn.port);
+ awaitShell();
- // Explicit sessions should still include session ids.
- const session = conn.startSession();
- const sessionColl = session.getDatabase("test").getCollection("foo");
- inspectCommandForSessionId(function() {
- assert.writeOK(sessionColl.insert({x: 1}));
- }, {shouldIncludeId: true});
-
- // Commands run in a parallel shell inherit the disable flag.
- TestData.inspectCommandForSessionId = inspectCommandForSessionId;
- const awaitShell = startParallelShell(function() {
- const parallelColl = db.getCollection("foo");
- TestData.inspectCommandForSessionId(function() {
- assert.writeOK(parallelColl.insert({x: 1}));
- }, {shouldIncludeId: false});
- }, conn.port);
- awaitShell();
-
- session.endSession();
- MongoRunner.stopMongod(conn);
- }
+ session.endSession();
+ MongoRunner.stopMongod(conn);
+}
- runTest();
+runTest();
- runTestTransitionToDisabled();
+runTestTransitionToDisabled();
- assert(_shouldUseImplicitSessions());
+assert(_shouldUseImplicitSessions());
- TestData.disableImplicitSessions = true;
- runTestDisabled();
+TestData.disableImplicitSessions = true;
+runTestDisabled();
})();
diff --git a/jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js b/jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js
index 0875b4cec97..352541b890d 100644
--- a/jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js
+++ b/jstests/noPassthrough/index_builds_ignore_prepare_conflicts.js
@@ -10,105 +10,104 @@
* ]
*/
(function() {
- "use strict";
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js"); // for PrepareHelpers
+load("jstests/noPassthrough/libs/index_build.js"); // for IndexBuildTest
+
+const replSetTest = new ReplSetTest({
+ name: "index_builds_ignore_prepare_conflicts",
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+});
+replSetTest.startSet();
+replSetTest.initiate();
+
+const primary = replSetTest.getPrimary();
+const primaryDB = primary.getDB('test');
+
+let numDocs = 10;
+let setUp = function(coll) {
+ coll.drop();
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; i++) {
+ bulk.insert({i: i});
+ }
+ assert.commandWorked(bulk.execute());
+};
- load("jstests/core/txns/libs/prepare_helpers.js"); // for PrepareHelpers
- load("jstests/noPassthrough/libs/index_build.js"); // for IndexBuildTest
-
- const replSetTest = new ReplSetTest({
- name: "index_builds_ignore_prepare_conflicts",
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- ],
+/**
+ * Run a background index build, and depending on the provided node, 'conn', ensure that a
+ * prepared update does not introduce prepare conflicts on the index builder.
+ */
+let runTest = function(conn) {
+ const testDB = conn.getDB('test');
+
+ const collName = 'index_builds_ignore_prepare_conflicts';
+ const coll = primaryDB.getCollection(collName);
+ setUp(coll);
+
+ // Start and pause an index build.
+ IndexBuildTest.pauseIndexBuilds(conn);
+ const awaitBuild = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {i: 1});
+ const opId = IndexBuildTest.waitForIndexBuildToStart(testDB, collName, "i_1");
+
+ // This insert will block until the index build pauses and releases its exclusive lock.
+ // This guarantees that the subsequent transaction can immediately acquire a lock and not
+ // fail with a LockTimeout error.
+ assert.commandWorked(coll.insert({i: numDocs++}));
+
+ // Start a session and introduce a document that is in a prepared state, but should be
+ // ignored by the index build, at least until the transaction commits.
+ const session = primaryDB.getMongo().startSession();
+ const sessionDB = session.getDatabase('test');
+ const sessionColl = sessionDB.getCollection(collName);
+ session.startTransaction();
+ assert.commandWorked(sessionColl.update({i: 0}, {i: "prepared"}));
+ // Use w:1 because the secondary will be unable to replicate the prepare while an index
+ // build is running.
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+
+ // Let the index build continue until just before it completes. Set the failpoint just
+ // before the second drain, which would take lock that conflicts with the prepared
+ // transaction and prevent the index build from completing entirely.
+ const failPointName = "hangAfterIndexBuildFirstDrain";
+ clearRawMongoProgramOutput();
+ assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn"}));
+
+ // Unpause the index build from the first failpoint so that it can resume and pause at the
+ // next failpoint.
+ IndexBuildTest.resumeIndexBuilds(conn);
+ assert.soon(() =>
+ rawMongoProgramOutput().indexOf("Hanging after index build first drain") >= 0);
+
+ // Right before the index build completes, ensure no prepare conflicts were hit.
+ IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId, (op) => {
+ printjson(op);
+ assert.eq(undefined, op.prepareReadConflicts);
});
- replSetTest.startSet();
- replSetTest.initiate();
-
- const primary = replSetTest.getPrimary();
- const primaryDB = primary.getDB('test');
-
- let numDocs = 10;
- let setUp = function(coll) {
- coll.drop();
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({i: i});
- }
- assert.commandWorked(bulk.execute());
- };
-
- /**
- * Run a background index build, and depending on the provided node, 'conn', ensure that a
- * prepared update does not introduce prepare conflicts on the index builder.
- */
- let runTest = function(conn) {
- const testDB = conn.getDB('test');
-
- const collName = 'index_builds_ignore_prepare_conflicts';
- const coll = primaryDB.getCollection(collName);
- setUp(coll);
-
- // Start and pause an index build.
- IndexBuildTest.pauseIndexBuilds(conn);
- const awaitBuild = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {i: 1});
- const opId = IndexBuildTest.waitForIndexBuildToStart(testDB, collName, "i_1");
-
- // This insert will block until the index build pauses and releases its exclusive lock.
- // This guarantees that the subsequent transaction can immediately acquire a lock and not
- // fail with a LockTimeout error.
- assert.commandWorked(coll.insert({i: numDocs++}));
-
- // Start a session and introduce a document that is in a prepared state, but should be
- // ignored by the index build, at least until the transaction commits.
- const session = primaryDB.getMongo().startSession();
- const sessionDB = session.getDatabase('test');
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.update({i: 0}, {i: "prepared"}));
- // Use w:1 because the secondary will be unable to replicate the prepare while an index
- // build is running.
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
-
- // Let the index build continue until just before it completes. Set the failpoint just
- // before the second drain, which would take lock that conflicts with the prepared
- // transaction and prevent the index build from completing entirely.
- const failPointName = "hangAfterIndexBuildFirstDrain";
- clearRawMongoProgramOutput();
- assert.commandWorked(
- conn.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn"}));
-
- // Unpause the index build from the first failpoint so that it can resume and pause at the
- // next failpoint.
- IndexBuildTest.resumeIndexBuilds(conn);
- assert.soon(
- () => rawMongoProgramOutput().indexOf("Hanging after index build first drain") >= 0);
-
- // Right before the index build completes, ensure no prepare conflicts were hit.
- IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId, (op) => {
- printjson(op);
- assert.eq(undefined, op.prepareReadConflicts);
- });
-
- // Because prepare uses w:1, ensure it is majority committed before committing the
- // transaction.
- PrepareHelpers.awaitMajorityCommitted(replSetTest, prepareTimestamp);
-
- // Commit the transaction before completing the index build, releasing locks which will
- // allow the index build to complete.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- // Allow the index build to complete.
- assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "off"}));
-
- awaitBuild();
- IndexBuildTest.waitForIndexBuildToStop(testDB, collName, "i_1");
-
- assert.eq(numDocs, coll.count());
- assert.eq(numDocs, coll.find().itcount());
- };
-
- runTest(replSetTest.getPrimary());
-
- replSetTest.stopSet();
+
+ // Because prepare uses w:1, ensure it is majority committed before committing the
+ // transaction.
+ PrepareHelpers.awaitMajorityCommitted(replSetTest, prepareTimestamp);
+
+ // Commit the transaction before completing the index build, releasing locks which will
+ // allow the index build to complete.
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+ // Allow the index build to complete.
+ assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+
+ awaitBuild();
+ IndexBuildTest.waitForIndexBuildToStop(testDB, collName, "i_1");
+
+ assert.eq(numDocs, coll.count());
+ assert.eq(numDocs, coll.find().itcount());
+};
+
+runTest(replSetTest.getPrimary());
+
+replSetTest.stopSet();
})();
diff --git a/jstests/noPassthrough/index_killop_standalone.js b/jstests/noPassthrough/index_killop_standalone.js
index fd1e4662859..2bb2376237b 100644
--- a/jstests/noPassthrough/index_killop_standalone.js
+++ b/jstests/noPassthrough/index_killop_standalone.js
@@ -2,47 +2,47 @@
* Confirms that both foreground and background index builds can be aborted using killop.
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
- assert.commandWorked(testDB.dropDatabase());
- assert.writeOK(testDB.test.insert({a: 1}));
- const coll = testDB.test;
+const testDB = conn.getDB("test");
+assert.commandWorked(testDB.dropDatabase());
+assert.writeOK(testDB.test.insert({a: 1}));
+const coll = testDB.test;
- // Test that building an index with 'options' can be aborted using killop.
- function testAbortIndexBuild(options) {
- IndexBuildTest.pauseIndexBuilds(conn);
+// Test that building an index with 'options' can be aborted using killop.
+function testAbortIndexBuild(options) {
+ IndexBuildTest.pauseIndexBuilds(conn);
- const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: 1}, options);
+ const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: 1}, options);
- // When the index build starts, find its op id.
- const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
+ // When the index build starts, find its op id.
+ const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
- // Kill the index build.
- assert.commandWorked(testDB.killOp(opId));
+ // Kill the index build.
+ assert.commandWorked(testDB.killOp(opId));
- // Wait for the index build to stop.
- try {
- IndexBuildTest.waitForIndexBuildToStop(testDB);
- } finally {
- IndexBuildTest.resumeIndexBuilds(conn);
- }
+ // Wait for the index build to stop.
+ try {
+ IndexBuildTest.waitForIndexBuildToStop(testDB);
+ } finally {
+ IndexBuildTest.resumeIndexBuilds(conn);
+ }
- const exitCode = createIdx({checkExitSuccess: false});
- assert.neq(
- 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
+ const exitCode = createIdx({checkExitSuccess: false});
+ assert.neq(
+ 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
- // Check that no new index has been created. This verifies that the index build was aborted
- // rather than successfully completed.
- IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
- }
+ // Check that no new index has been created. This verifies that the index build was aborted
+ // rather than successfully completed.
+ IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
+}
- testAbortIndexBuild({background: true});
- testAbortIndexBuild({background: false});
- MongoRunner.stopMongod(conn);
+testAbortIndexBuild({background: true});
+testAbortIndexBuild({background: false});
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/index_partial_no_explain_cmds.js b/jstests/noPassthrough/index_partial_no_explain_cmds.js
index 5083ab2881e..f1295e5531c 100644
--- a/jstests/noPassthrough/index_partial_no_explain_cmds.js
+++ b/jstests/noPassthrough/index_partial_no_explain_cmds.js
@@ -1,57 +1,57 @@
// Test partial indexes with commands that don't use explain. These commands are tested against
// mongod with the --notablescan flag set, so that they fail if the index is not used.
(function() {
- "use strict";
- var runner = MongoRunner.runMongod({setParameter: "notablescan=1"});
- var coll = runner.getDB("test").index_partial_no_explain_cmds;
- var ret;
-
- coll.drop();
-
- assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}}));
-
- assert.writeOK(coll.insert({_id: 1, x: 5, a: 2})); // Not in index.
- assert.writeOK(coll.insert({_id: 2, x: 6, a: 1})); // In index.
-
- // Verify we will throw if the partial index can't be used.
- assert.throws(function() {
- coll.find({x: {$gt: 1}, a: 2}).itcount();
- });
-
- //
- // Test mapReduce.
- //
-
- var mapFunc = function() {
- emit(this._id, 1);
- };
- var reduceFunc = function(keyId, countArray) {
- return Array.sum(countArray);
- };
-
- ret = coll.mapReduce(mapFunc, reduceFunc, {out: "inline", query: {x: {$gt: 1}, a: 1}});
- assert.eq(1, ret.counts.input);
-
- //
- // Test distinct.
- //
-
- ret = coll.distinct("a", {x: {$gt: 1}, a: 1});
- assert.eq(1, ret.length);
- ret = coll.distinct("x", {x: {$gt: 1}, a: 1});
- assert.eq(1, ret.length);
- assert.throws(function() {
- printjson(coll.distinct("a", {a: 0}));
- });
- assert.throws(function() {
- printjson(coll.distinct("x", {a: 0}));
- });
-
- // SERVER-19511 regression test: distinct with no query predicate should return the correct
- // number of results. This query should not be allowed to use the partial index, so it should
- // use a collection scan instead. Although this test enables --notablescan, this does not cause
- // operations to fail if they have no query predicate.
- ret = coll.distinct("x");
- assert.eq(2, ret.length);
- MongoRunner.stopMongod(runner);
+"use strict";
+var runner = MongoRunner.runMongod({setParameter: "notablescan=1"});
+var coll = runner.getDB("test").index_partial_no_explain_cmds;
+var ret;
+
+coll.drop();
+
+assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}}));
+
+assert.writeOK(coll.insert({_id: 1, x: 5, a: 2})); // Not in index.
+assert.writeOK(coll.insert({_id: 2, x: 6, a: 1})); // In index.
+
+// Verify we will throw if the partial index can't be used.
+assert.throws(function() {
+ coll.find({x: {$gt: 1}, a: 2}).itcount();
+});
+
+//
+// Test mapReduce.
+//
+
+var mapFunc = function() {
+ emit(this._id, 1);
+};
+var reduceFunc = function(keyId, countArray) {
+ return Array.sum(countArray);
+};
+
+ret = coll.mapReduce(mapFunc, reduceFunc, {out: "inline", query: {x: {$gt: 1}, a: 1}});
+assert.eq(1, ret.counts.input);
+
+//
+// Test distinct.
+//
+
+ret = coll.distinct("a", {x: {$gt: 1}, a: 1});
+assert.eq(1, ret.length);
+ret = coll.distinct("x", {x: {$gt: 1}, a: 1});
+assert.eq(1, ret.length);
+assert.throws(function() {
+ printjson(coll.distinct("a", {a: 0}));
+});
+assert.throws(function() {
+ printjson(coll.distinct("x", {a: 0}));
+});
+
+// SERVER-19511 regression test: distinct with no query predicate should return the correct
+// number of results. This query should not be allowed to use the partial index, so it should
+// use a collection scan instead. Although this test enables --notablescan, this does not cause
+// operations to fail if they have no query predicate.
+ret = coll.distinct("x");
+assert.eq(2, ret.length);
+MongoRunner.stopMongod(runner);
})();
diff --git a/jstests/noPassthrough/index_version_autoupgrade.js b/jstests/noPassthrough/index_version_autoupgrade.js
index fef289ddca5..9a8769da7e4 100644
--- a/jstests/noPassthrough/index_version_autoupgrade.js
+++ b/jstests/noPassthrough/index_version_autoupgrade.js
@@ -3,138 +3,135 @@
* indexes when they are rebuilt on a collection.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- var conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
+var conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
- var testDB = conn.getDB("test");
- assert.commandWorked(testDB.runCommand({create: "index_version_autoupgrade"}));
- var allIndexes = testDB.index_version_autoupgrade.getIndexes();
+var testDB = conn.getDB("test");
+assert.commandWorked(testDB.runCommand({create: "index_version_autoupgrade"}));
+var allIndexes = testDB.index_version_autoupgrade.getIndexes();
+var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "Index with key pattern {_id: 1} not found: " + tojson(allIndexes));
+var defaultIndexVersion = spec.v;
+assert.lte(2, defaultIndexVersion, "Expected the defaultIndexVersion to be at least v=2");
+
+/**
+ * Tests whether the execution of the 'commandFn' function automatically upgrades the index
+ * version of existing indexes.
+ *
+ * The 'commandFn' function takes a single argument of the collection to act on and returns a
+ * collection to validate the index versions of. Most often the 'commandFn' function returns
+ * its input collection, but is able to return a reference to a different collection to support
+ * testing the effects of cloning commands.
+ *
+ * If 'doesAutoUpgrade' is true, then this function verifies that the indexes on the returned
+ * collection have been upgraded to the 'defaultIndexVersion'. If 'doesAutoUpgrade' is false,
+ * then this function verifies that the indexes on the returned collection are unchanged.
+ */
+function testIndexVersionAutoUpgrades(commandFn, doesAutoUpgrade) {
+ testDB.dropDatabase();
+ var coll = testDB.index_version_autoupgrade;
+
+ // Create a v=1 _id index.
+ assert.commandWorked(testDB.createCollection("index_version_autoupgrade",
+ {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
+ var allIndexes = coll.getIndexes();
var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
assert.neq(null, spec, "Index with key pattern {_id: 1} not found: " + tojson(allIndexes));
- var defaultIndexVersion = spec.v;
- assert.lte(2, defaultIndexVersion, "Expected the defaultIndexVersion to be at least v=2");
-
- /**
- * Tests whether the execution of the 'commandFn' function automatically upgrades the index
- * version of existing indexes.
- *
- * The 'commandFn' function takes a single argument of the collection to act on and returns a
- * collection to validate the index versions of. Most often the 'commandFn' function returns
- * its input collection, but is able to return a reference to a different collection to support
- * testing the effects of cloning commands.
- *
- * If 'doesAutoUpgrade' is true, then this function verifies that the indexes on the returned
- * collection have been upgraded to the 'defaultIndexVersion'. If 'doesAutoUpgrade' is false,
- * then this function verifies that the indexes on the returned collection are unchanged.
- */
- function testIndexVersionAutoUpgrades(commandFn, doesAutoUpgrade) {
- testDB.dropDatabase();
- var coll = testDB.index_version_autoupgrade;
-
- // Create a v=1 _id index.
- assert.commandWorked(testDB.createCollection(
- "index_version_autoupgrade", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
- var allIndexes = coll.getIndexes();
- var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "Index with key pattern {_id: 1} not found: " + tojson(allIndexes));
- assert.eq(1, spec.v, "Expected a v=1 index to be built: " + tojson(spec));
-
- assert.commandWorked(coll.createIndex({withoutAnyOptions: 1}));
- allIndexes = coll.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withoutAnyOptions: 1});
- assert.neq(
- null,
- spec,
- "Index with key pattern {withoutAnyOptions: 1} not found: " + tojson(allIndexes));
- assert.eq(defaultIndexVersion,
- spec.v,
- "Expected an index with the default version to be built: " + tojson(spec));
-
- assert.commandWorked(coll.createIndex({withV1: 1}, {v: 1}));
- allIndexes = coll.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withV1: 1});
- assert.neq(
- null, spec, "Index with key pattern {withV1: 1} not found: " + tojson(allIndexes));
- assert.eq(1, spec.v, "Expected a v=1 index to be built: " + tojson(spec));
-
- assert.commandWorked(coll.createIndex({withV2: 1}, {v: 2}));
- allIndexes = coll.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withV2: 1});
- assert.neq(
- null, spec, "Index with key pattern {withV2: 1} not found: " + tojson(allIndexes));
- assert.eq(2, spec.v, "Expected a v=2 index to be built: " + tojson(spec));
-
- var collToVerify = commandFn(coll);
- var expectedResults;
-
- if (doesAutoUpgrade) {
- expectedResults = [
- {keyPattern: {_id: 1}, version: defaultIndexVersion},
- {keyPattern: {withoutAnyOptions: 1}, version: defaultIndexVersion},
- {keyPattern: {withV1: 1}, version: defaultIndexVersion},
- {keyPattern: {withV2: 1}, version: defaultIndexVersion},
- ];
-
- } else {
- expectedResults = [
- {keyPattern: {_id: 1}, version: 1},
- {keyPattern: {withoutAnyOptions: 1}, version: defaultIndexVersion},
- {keyPattern: {withV1: 1}, version: 1},
- {keyPattern: {withV2: 1}, version: 2},
- ];
- }
-
- expectedResults.forEach(function(expected) {
- var allIndexes = collToVerify.getIndexes();
- var spec = GetIndexHelpers.findByKeyPattern(allIndexes, expected.keyPattern);
- assert.neq(null,
- spec,
- "Index with key pattern " + tojson(expected.keyPattern) + " not found: " +
- tojson(allIndexes));
- assert.eq(expected.version,
- spec.v,
- "Expected index to be rebuilt with " +
- (doesAutoUpgrade ? "the default" : "its original") + " version: " +
- tojson(spec));
- });
+ assert.eq(1, spec.v, "Expected a v=1 index to be built: " + tojson(spec));
+
+ assert.commandWorked(coll.createIndex({withoutAnyOptions: 1}));
+ allIndexes = coll.getIndexes();
+ spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withoutAnyOptions: 1});
+ assert.neq(null,
+ spec,
+ "Index with key pattern {withoutAnyOptions: 1} not found: " + tojson(allIndexes));
+ assert.eq(defaultIndexVersion,
+ spec.v,
+ "Expected an index with the default version to be built: " + tojson(spec));
+
+ assert.commandWorked(coll.createIndex({withV1: 1}, {v: 1}));
+ allIndexes = coll.getIndexes();
+ spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withV1: 1});
+ assert.neq(null, spec, "Index with key pattern {withV1: 1} not found: " + tojson(allIndexes));
+ assert.eq(1, spec.v, "Expected a v=1 index to be built: " + tojson(spec));
+
+ assert.commandWorked(coll.createIndex({withV2: 1}, {v: 2}));
+ allIndexes = coll.getIndexes();
+ spec = GetIndexHelpers.findByKeyPattern(allIndexes, {withV2: 1});
+ assert.neq(null, spec, "Index with key pattern {withV2: 1} not found: " + tojson(allIndexes));
+ assert.eq(2, spec.v, "Expected a v=2 index to be built: " + tojson(spec));
+
+ var collToVerify = commandFn(coll);
+ var expectedResults;
+
+ if (doesAutoUpgrade) {
+ expectedResults = [
+ {keyPattern: {_id: 1}, version: defaultIndexVersion},
+ {keyPattern: {withoutAnyOptions: 1}, version: defaultIndexVersion},
+ {keyPattern: {withV1: 1}, version: defaultIndexVersion},
+ {keyPattern: {withV2: 1}, version: defaultIndexVersion},
+ ];
+
+ } else {
+ expectedResults = [
+ {keyPattern: {_id: 1}, version: 1},
+ {keyPattern: {withoutAnyOptions: 1}, version: defaultIndexVersion},
+ {keyPattern: {withV1: 1}, version: 1},
+ {keyPattern: {withV2: 1}, version: 2},
+ ];
}
- // Test that the "reIndex" command upgrades all existing indexes to the latest version.
- testIndexVersionAutoUpgrades(function(coll) {
- assert.commandWorked(coll.getDB().runCommand({reIndex: coll.getName()}));
- return coll;
- }, true);
-
- // Test that the "compact" command doesn't upgrade existing indexes to the latest version.
- testIndexVersionAutoUpgrades(function(coll) {
- var res = coll.getDB().runCommand({compact: coll.getName()});
- if (res.ok === 0) {
- // Ephemeral storage engines don't support the "compact" command. The existing indexes
- // should remain unchanged.
- assert.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
- } else {
- assert.commandWorked(res);
- }
- return coll;
- }, false);
-
- // Test that the "cloneCollection" command doesn't upgrade existing indexes to the latest
- // version.
- var cloneConn = MongoRunner.runMongod({});
- assert.neq(null, cloneConn, "mongod was unable to start up");
- testIndexVersionAutoUpgrades(function(coll) {
- var cloneDB = cloneConn.getDB(coll.getDB().getName());
- assert.commandWorked(cloneDB.runCommand({
- cloneCollection: coll.getFullName(),
- from: conn.host,
- }));
- return cloneDB[coll.getName()];
- }, false);
- MongoRunner.stopMongod(cloneConn);
-
- MongoRunner.stopMongod(conn);
+ expectedResults.forEach(function(expected) {
+ var allIndexes = collToVerify.getIndexes();
+ var spec = GetIndexHelpers.findByKeyPattern(allIndexes, expected.keyPattern);
+ assert.neq(null,
+ spec,
+ "Index with key pattern " + tojson(expected.keyPattern) +
+ " not found: " + tojson(allIndexes));
+ assert.eq(expected.version,
+ spec.v,
+ "Expected index to be rebuilt with " +
+ (doesAutoUpgrade ? "the default" : "its original") +
+ " version: " + tojson(spec));
+ });
+}
+
+// Test that the "reIndex" command upgrades all existing indexes to the latest version.
+testIndexVersionAutoUpgrades(function(coll) {
+ assert.commandWorked(coll.getDB().runCommand({reIndex: coll.getName()}));
+ return coll;
+}, true);
+
+// Test that the "compact" command doesn't upgrade existing indexes to the latest version.
+testIndexVersionAutoUpgrades(function(coll) {
+ var res = coll.getDB().runCommand({compact: coll.getName()});
+ if (res.ok === 0) {
+ // Ephemeral storage engines don't support the "compact" command. The existing indexes
+ // should remain unchanged.
+ assert.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
+ } else {
+ assert.commandWorked(res);
+ }
+ return coll;
+}, false);
+
+// Test that the "cloneCollection" command doesn't upgrade existing indexes to the latest
+// version.
+var cloneConn = MongoRunner.runMongod({});
+assert.neq(null, cloneConn, "mongod was unable to start up");
+testIndexVersionAutoUpgrades(function(coll) {
+ var cloneDB = cloneConn.getDB(coll.getDB().getName());
+ assert.commandWorked(cloneDB.runCommand({
+ cloneCollection: coll.getFullName(),
+ from: conn.host,
+ }));
+ return cloneDB[coll.getName()];
+}, false);
+MongoRunner.stopMongod(cloneConn);
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/index_version_v2.js b/jstests/noPassthrough/index_version_v2.js
index b3fe65841d3..886c7c39590 100644
--- a/jstests/noPassthrough/index_version_v2.js
+++ b/jstests/noPassthrough/index_version_v2.js
@@ -6,119 +6,116 @@
* the KeyString format.
*/
(function() {
- "use strict";
-
- const storageEnginesUsingKeyString = new Set(["wiredTiger", "inMemory", "rocksdb"]);
-
- function getIndexSpecByName(coll, indexName) {
- const indexes = coll.getIndexes();
- const indexesFilteredByName = indexes.filter(spec => spec.name === indexName);
- assert.eq(1,
- indexesFilteredByName.length,
- "index '" + indexName + "' not found: " + tojson(indexes));
- return indexesFilteredByName[0];
- }
-
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
-
- const testDB = conn.getDB("test");
- const storageEngine = testDB.serverStatus().storageEngine.name;
-
- //
- // Index version v=2
- //
-
- testDB.dropDatabase();
-
- // Test that the _id index of a collection is created with v=2 by default.
- assert.commandWorked(testDB.runCommand({create: "index_version"}));
- let indexSpec = getIndexSpecByName(testDB.index_version, "_id_");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that an index created on an existing collection is created with v=2 by default.
- assert.commandWorked(testDB.index_version.createIndex({defaultToV2: 1}, {name: "defaultToV2"}));
- indexSpec = getIndexSpecByName(testDB.index_version, "defaultToV2");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that creating an index with v=2 succeeds.
- assert.commandWorked(testDB.index_version.createIndex({withV2: 1}, {v: 2, name: "withV2"}));
- indexSpec = getIndexSpecByName(testDB.index_version, "withV2");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that creating a collection with a non-simple default collation succeeds.
- assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
- indexSpec = getIndexSpecByName(testDB.collation, "_id_");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that creating an index with a non-simple collation succeeds.
- assert.commandWorked(
- testDB.collation.createIndex({str: 1}, {name: "withCollation", collation: {locale: "fr"}}));
- indexSpec = getIndexSpecByName(testDB.collation, "withCollation");
- assert.eq(2, indexSpec.v, tojson(indexSpec));
-
- // Test that indexing decimal data succeeds.
- assert.writeOK(testDB.decimal.insert({_id: new NumberDecimal("42")}));
-
- //
- // Index version v=1
- //
-
- testDB.dropDatabase();
-
- // Test that creating an index with v=1 succeeds.
- assert.commandWorked(testDB.index_version.createIndex({withV1: 1}, {v: 1, name: "withV1"}));
- indexSpec = getIndexSpecByName(testDB.index_version, "withV1");
- assert.eq(1, indexSpec.v, tojson(indexSpec));
-
- // Test that creating an index with v=1 and a simple collation returns an error.
- assert.commandFailed(
- testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "simple"}}));
-
- // Test that creating an index with v=1 and a non-simple collation returns an error.
- assert.commandFailed(
- testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "en", strength: 2}}));
-
- // Test that creating an index with v=1 and a simple collation on a collection with a non-simple
- // default collation returns an error.
- testDB.collation.drop();
- assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
- assert.commandFailed(
- testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "simple"}}));
-
- // Test that creating an index with v=1 and a non-simple collation on a collection with a
- // non-simple default collation returns an error.
- testDB.collation.drop();
- assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
- assert.commandFailed(
- testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "en", strength: 2}}));
-
- // Test that indexing decimal data with a v=1 index returns an error on storage engines using
- // the KeyString format.
- assert.commandWorked(testDB.decimal.createIndex({num: 1}, {v: 1}));
- if (storageEnginesUsingKeyString.has(storageEngine)) {
- assert.writeErrorWithCode(testDB.decimal.insert({num: new NumberDecimal("42")}),
- ErrorCodes.UnsupportedFormat);
- } else {
- assert.writeOK(testDB.decimal.insert({num: new NumberDecimal("42")}));
- }
-
- //
- // Index version v=0
- //
-
- testDB.dropDatabase();
-
- // Test that attempting to create an index with v=0 returns an error.
- assert.commandFailed(testDB.index_version.createIndex({withV0: 1}, {v: 0}));
-
- //
- // Index version v=3
- //
-
- testDB.dropDatabase();
-
- // Test that attempting to create an index with v=3 returns an error.
- assert.commandFailed(testDB.index_version.createIndex({withV3: 1}, {v: 3}));
- MongoRunner.stopMongod(conn);
+"use strict";
+
+const storageEnginesUsingKeyString = new Set(["wiredTiger", "inMemory", "rocksdb"]);
+
+function getIndexSpecByName(coll, indexName) {
+ const indexes = coll.getIndexes();
+ const indexesFilteredByName = indexes.filter(spec => spec.name === indexName);
+ assert.eq(
+ 1, indexesFilteredByName.length, "index '" + indexName + "' not found: " + tojson(indexes));
+ return indexesFilteredByName[0];
+}
+
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+
+const testDB = conn.getDB("test");
+const storageEngine = testDB.serverStatus().storageEngine.name;
+
+//
+// Index version v=2
+//
+
+testDB.dropDatabase();
+
+// Test that the _id index of a collection is created with v=2 by default.
+assert.commandWorked(testDB.runCommand({create: "index_version"}));
+let indexSpec = getIndexSpecByName(testDB.index_version, "_id_");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that an index created on an existing collection is created with v=2 by default.
+assert.commandWorked(testDB.index_version.createIndex({defaultToV2: 1}, {name: "defaultToV2"}));
+indexSpec = getIndexSpecByName(testDB.index_version, "defaultToV2");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that creating an index with v=2 succeeds.
+assert.commandWorked(testDB.index_version.createIndex({withV2: 1}, {v: 2, name: "withV2"}));
+indexSpec = getIndexSpecByName(testDB.index_version, "withV2");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that creating a collection with a non-simple default collation succeeds.
+assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
+indexSpec = getIndexSpecByName(testDB.collation, "_id_");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that creating an index with a non-simple collation succeeds.
+assert.commandWorked(
+ testDB.collation.createIndex({str: 1}, {name: "withCollation", collation: {locale: "fr"}}));
+indexSpec = getIndexSpecByName(testDB.collation, "withCollation");
+assert.eq(2, indexSpec.v, tojson(indexSpec));
+
+// Test that indexing decimal data succeeds.
+assert.writeOK(testDB.decimal.insert({_id: new NumberDecimal("42")}));
+
+//
+// Index version v=1
+//
+
+testDB.dropDatabase();
+
+// Test that creating an index with v=1 succeeds.
+assert.commandWorked(testDB.index_version.createIndex({withV1: 1}, {v: 1, name: "withV1"}));
+indexSpec = getIndexSpecByName(testDB.index_version, "withV1");
+assert.eq(1, indexSpec.v, tojson(indexSpec));
+
+// Test that creating an index with v=1 and a simple collation returns an error.
+assert.commandFailed(testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "simple"}}));
+
+// Test that creating an index with v=1 and a non-simple collation returns an error.
+assert.commandFailed(
+ testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "en", strength: 2}}));
+
+// Test that creating an index with v=1 and a simple collation on a collection with a non-simple
+// default collation returns an error.
+testDB.collation.drop();
+assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
+assert.commandFailed(testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "simple"}}));
+
+// Test that creating an index with v=1 and a non-simple collation on a collection with a
+// non-simple default collation returns an error.
+testDB.collation.drop();
+assert.commandWorked(testDB.runCommand({create: "collation", collation: {locale: "en"}}));
+assert.commandFailed(
+ testDB.collation.createIndex({str: 1}, {v: 1, collation: {locale: "en", strength: 2}}));
+
+// Test that indexing decimal data with a v=1 index returns an error on storage engines using
+// the KeyString format.
+assert.commandWorked(testDB.decimal.createIndex({num: 1}, {v: 1}));
+if (storageEnginesUsingKeyString.has(storageEngine)) {
+ assert.writeErrorWithCode(testDB.decimal.insert({num: new NumberDecimal("42")}),
+ ErrorCodes.UnsupportedFormat);
+} else {
+ assert.writeOK(testDB.decimal.insert({num: new NumberDecimal("42")}));
+}
+
+//
+// Index version v=0
+//
+
+testDB.dropDatabase();
+
+// Test that attempting to create an index with v=0 returns an error.
+assert.commandFailed(testDB.index_version.createIndex({withV0: 1}, {v: 0}));
+
+//
+// Index version v=3
+//
+
+testDB.dropDatabase();
+
+// Test that attempting to create an index with v=3 returns an error.
+assert.commandFailed(testDB.index_version.createIndex({withV3: 1}, {v: 3}));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index 7b280ed9d07..1b06b881e30 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -2,131 +2,131 @@
// @tags: [SERVER-40561]
(function() {
- "use strict";
-
- load("jstests/noPassthrough/libs/index_build.js");
-
- const conn = MongoRunner.runMongod({nojournal: ""});
- assert.neq(null, conn, "mongod failed to start.");
- var db = conn.getDB("test");
- var baseName = "jstests_indexbg1";
-
- var parallel = function() {
- return db[baseName + "_parallelStatus"];
- };
-
- var resetParallel = function() {
- parallel().drop();
- };
-
- // Return the PID to call `waitpid` on for clean shutdown.
- var doParallel = function(work) {
- resetParallel();
- print("doParallel: " + work);
- return startMongoProgramNoConnect(
- "mongo",
- "--eval",
- work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
- db.getMongo().host);
- };
-
- var doneParallel = function() {
- return !!parallel().findOne();
- };
-
- var waitParallel = function() {
- assert.soon(function() {
- return doneParallel();
- }, "parallel did not finish in time", 300000, 1000);
- };
-
- var size = 400 * 1000;
- var bgIndexBuildPid;
- while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
- print("size: " + size);
-
- var fullName = "db." + baseName;
- var t = db[baseName];
- t.drop();
-
- var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute());
+"use strict";
+
+load("jstests/noPassthrough/libs/index_build.js");
+
+const conn = MongoRunner.runMongod({nojournal: ""});
+assert.neq(null, conn, "mongod failed to start.");
+var db = conn.getDB("test");
+var baseName = "jstests_indexbg1";
+
+var parallel = function() {
+ return db[baseName + "_parallelStatus"];
+};
+
+var resetParallel = function() {
+ parallel().drop();
+};
+
+// Return the PID to call `waitpid` on for clean shutdown.
+var doParallel = function(work) {
+ resetParallel();
+ print("doParallel: " + work);
+ return startMongoProgramNoConnect(
+ "mongo",
+ "--eval",
+ work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
+ db.getMongo().host);
+};
+
+var doneParallel = function() {
+ return !!parallel().findOne();
+};
+
+var waitParallel = function() {
+ assert.soon(function() {
+ return doneParallel();
+ }, "parallel did not finish in time", 300000, 1000);
+};
+
+var size = 400 * 1000;
+var bgIndexBuildPid;
+while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
+ print("size: " + size);
+
+ var fullName = "db." + baseName;
+ var t = db[baseName];
+ t.drop();
+
+ var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
+ }
+ assert.writeOK(bulk.execute());
+ assert.eq(size, t.count());
+
+ bgIndexBuildPid = doParallel(fullName + ".ensureIndex( {i:1}, {background:true} )");
+ try {
+ // wait for indexing to start
+ print("wait for indexing to start");
+ IndexBuildTest.waitForIndexBuildToStart(db);
+ print("started.");
+ sleep(1000); // there is a race between when the index build shows up in curop and
+ // when it first attempts to grab a write lock.
assert.eq(size, t.count());
-
- bgIndexBuildPid = doParallel(fullName + ".ensureIndex( {i:1}, {background:true} )");
- try {
- // wait for indexing to start
- print("wait for indexing to start");
- IndexBuildTest.waitForIndexBuildToStart(db);
- print("started.");
- sleep(1000); // there is a race between when the index build shows up in curop and
- // when it first attempts to grab a write lock.
- assert.eq(size, t.count());
- assert.eq(100, t.findOne({i: 100}).i);
- var q = t.find();
- for (i = 0; i < 120; ++i) { // getmore
- q.next();
- assert(q.hasNext(), "no next");
- }
- var ex = t.find({i: 100}).limit(-1).explain("executionStats");
- printjson(ex);
- assert(ex.executionStats.totalKeysExamined < 1000,
- "took too long to find 100: " + tojson(ex));
-
- assert.writeOK(t.remove({i: 40}, true)); // table scan
- assert.writeOK(t.update({i: 10}, {i: -10})); // should scan 10
-
- var id = t.find().hint({$natural: -1}).next()._id;
-
- assert.writeOK(t.update({_id: id}, {i: -2}));
- assert.writeOK(t.save({i: -50}));
- assert.writeOK(t.save({i: size + 2}));
-
- assert.eq(size + 1, t.count());
-
- print("finished with checks");
- } catch (e) {
- // only a failure if we're still indexing
- // wait for parallel status to update to reflect indexing status
- print("caught exception: " + e);
- sleep(1000);
- if (!doneParallel()) {
- throw e;
- }
- print("but that's OK");
+ assert.eq(100, t.findOne({i: 100}).i);
+ var q = t.find();
+ for (i = 0; i < 120; ++i) { // getmore
+ q.next();
+ assert(q.hasNext(), "no next");
}
+ var ex = t.find({i: 100}).limit(-1).explain("executionStats");
+ printjson(ex);
+ assert(ex.executionStats.totalKeysExamined < 1000,
+ "took too long to find 100: " + tojson(ex));
+
+ assert.writeOK(t.remove({i: 40}, true)); // table scan
+ assert.writeOK(t.update({i: 10}, {i: -10})); // should scan 10
- print("going to check if index is done");
+ var id = t.find().hint({$natural: -1}).next()._id;
+
+ assert.writeOK(t.update({_id: id}, {i: -2}));
+ assert.writeOK(t.save({i: -50}));
+ assert.writeOK(t.save({i: size + 2}));
+
+ assert.eq(size + 1, t.count());
+
+ print("finished with checks");
+ } catch (e) {
+ // only a failure if we're still indexing
+ // wait for parallel status to update to reflect indexing status
+ print("caught exception: " + e);
+ sleep(1000);
if (!doneParallel()) {
- break;
+ throw e;
}
- print("indexing finished too soon, retrying...");
- // Although the index build finished, ensure the shell has exited.
- waitProgram(bgIndexBuildPid);
- size *= 2;
- assert(size < 200000000, "unable to run checks in parallel with index creation");
+ print("but that's OK");
}
- print("our tests done, waiting for parallel to finish");
- waitParallel();
- // Ensure the shell has exited cleanly. Otherwise the test harness may send a SIGTERM which can
- // lead to a false test failure.
+ print("going to check if index is done");
+ if (!doneParallel()) {
+ break;
+ }
+ print("indexing finished too soon, retrying...");
+ // Although the index build finished, ensure the shell has exited.
waitProgram(bgIndexBuildPid);
- print("finished");
-
- assert.eq(1, t.count({i: -10}));
- assert.eq(1, t.count({i: -2}));
- assert.eq(1, t.count({i: -50}));
- assert.eq(1, t.count({i: size + 2}));
- assert.eq(0, t.count({i: 40}));
- print("about to drop index");
- t.dropIndex({i: 1});
- var gle = db.getLastError();
- printjson(gle);
- assert(!gle);
-
- MongoRunner.stopMongod(conn);
+ size *= 2;
+ assert(size < 200000000, "unable to run checks in parallel with index creation");
+}
+
+print("our tests done, waiting for parallel to finish");
+waitParallel();
+// Ensure the shell has exited cleanly. Otherwise the test harness may send a SIGTERM which can
+// lead to a false test failure.
+waitProgram(bgIndexBuildPid);
+print("finished");
+
+assert.eq(1, t.count({i: -10}));
+assert.eq(1, t.count({i: -2}));
+assert.eq(1, t.count({i: -50}));
+assert.eq(1, t.count({i: size + 2}));
+assert.eq(0, t.count({i: 40}));
+print("about to drop index");
+t.dropIndex({i: 1});
+var gle = db.getLastError();
+printjson(gle);
+assert(!gle);
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index a037dc97dd3..c7a119048ec 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -2,156 +2,156 @@
// @tags: [SERVER-40561, requires_document_locking]
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/check_log.js");
+
+const conn = MongoRunner.runMongod({nojournal: ""});
+assert.neq(null, conn, "mongod failed to start.");
+
+let db = conn.getDB("test");
+let baseName = "jstests_index12";
+
+let parallel = function() {
+ return db[baseName + "_parallelStatus"];
+};
+
+let resetParallel = function() {
+ parallel().drop();
+};
+
+// Return the PID to call `waitpid` on for clean shutdown.
+let doParallel = function(work) {
+ resetParallel();
+ return startMongoProgramNoConnect(
+ "mongo",
+ "--eval",
+ work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
+ db.getMongo().host);
+};
+
+let indexBuild = function() {
+ let fullName = "db." + baseName;
+ return doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )");
+};
+
+let doneParallel = function() {
+ return !!parallel().findOne();
+};
+
+let waitParallel = function() {
+ assert.soon(function() {
+ return doneParallel();
+ }, "parallel did not finish in time", 300000, 1000);
+};
+
+let turnFailPointOn = function(failPointName, i) {
+ assert.commandWorked(
+ conn.adminCommand({configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}}));
+};
+
+let turnFailPointOff = function(failPointName) {
+ assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+};
+
+// Unique background index build fails when there exists duplicate indexed values
+// for the duration of the build.
+let failOnExistingDuplicateValue = function(coll) {
+ let duplicateKey = 0;
+ assert.writeOK(coll.save({i: duplicateKey}));
+
+ let bgIndexBuildPid = indexBuild();
+ waitProgram(bgIndexBuildPid);
+ assert.eq(1, coll.getIndexes().length, "Index should fail. There exist duplicate values.");
+
+ // Revert to unique key set
+ coll.deleteOne({i: duplicateKey});
+};
+
+// Unique background index build fails when started with a unique key set,
+// but a document with a duplicate key is inserted prior to that key being indexed.
+let failOnInsertedDuplicateValue = function(coll) {
+ let duplicateKey = 7;
+
+ turnFailPointOn("hangBeforeIndexBuildOf", duplicateKey);
+
+ let bgIndexBuildPid;
+ try {
+ bgIndexBuildPid = indexBuild();
+ jsTestLog("Waiting to hang before index build of i=" + duplicateKey);
+ checkLog.contains(conn, "Hanging before index build of i=" + duplicateKey);
- load("jstests/libs/check_log.js");
-
- const conn = MongoRunner.runMongod({nojournal: ""});
- assert.neq(null, conn, "mongod failed to start.");
-
- let db = conn.getDB("test");
- let baseName = "jstests_index12";
-
- let parallel = function() {
- return db[baseName + "_parallelStatus"];
- };
-
- let resetParallel = function() {
- parallel().drop();
- };
-
- // Return the PID to call `waitpid` on for clean shutdown.
- let doParallel = function(work) {
- resetParallel();
- return startMongoProgramNoConnect(
- "mongo",
- "--eval",
- work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
- db.getMongo().host);
- };
-
- let indexBuild = function() {
- let fullName = "db." + baseName;
- return doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )");
- };
-
- let doneParallel = function() {
- return !!parallel().findOne();
- };
-
- let waitParallel = function() {
- assert.soon(function() {
- return doneParallel();
- }, "parallel did not finish in time", 300000, 1000);
- };
-
- let turnFailPointOn = function(failPointName, i) {
- assert.commandWorked(conn.adminCommand(
- {configureFailPoint: failPointName, mode: "alwaysOn", data: {"i": i}}));
- };
-
- let turnFailPointOff = function(failPointName) {
- assert.commandWorked(conn.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- };
-
- // Unique background index build fails when there exists duplicate indexed values
- // for the duration of the build.
- let failOnExistingDuplicateValue = function(coll) {
- let duplicateKey = 0;
assert.writeOK(coll.save({i: duplicateKey}));
+ } finally {
+ turnFailPointOff("hangBeforeIndexBuildOf");
+ }
- let bgIndexBuildPid = indexBuild();
- waitProgram(bgIndexBuildPid);
- assert.eq(1, coll.getIndexes().length, "Index should fail. There exist duplicate values.");
-
- // Revert to unique key set
- coll.deleteOne({i: duplicateKey});
- };
-
- // Unique background index build fails when started with a unique key set,
- // but a document with a duplicate key is inserted prior to that key being indexed.
- let failOnInsertedDuplicateValue = function(coll) {
- let duplicateKey = 7;
-
- turnFailPointOn("hangBeforeIndexBuildOf", duplicateKey);
+ waitProgram(bgIndexBuildPid);
+ assert.eq(1,
+ coll.getIndexes().length,
+ "Index should fail. Duplicate key is inserted prior to that key being indexed.");
- let bgIndexBuildPid;
- try {
- bgIndexBuildPid = indexBuild();
- jsTestLog("Waiting to hang before index build of i=" + duplicateKey);
- checkLog.contains(conn, "Hanging before index build of i=" + duplicateKey);
+ // Revert to unique key set
+ coll.deleteOne({i: duplicateKey});
+};
- assert.writeOK(coll.save({i: duplicateKey}));
- } finally {
- turnFailPointOff("hangBeforeIndexBuildOf");
- }
+// Unique background index build succeeds:
+// 1) when a document is inserted and removed with a key that has already been indexed
+// 2) when a document with a key not present in the initial set is inserted and removed
+let succeedWithoutWriteErrors = function(coll, newKey) {
+ let duplicateKey = 3;
- waitProgram(bgIndexBuildPid);
- assert.eq(1,
- coll.getIndexes().length,
- "Index should fail. Duplicate key is inserted prior to that key being indexed.");
+ turnFailPointOn("hangAfterIndexBuildOf", duplicateKey);
- // Revert to unique key set
- coll.deleteOne({i: duplicateKey});
- };
+ let bgIndexBuildPid;
+ try {
+ bgIndexBuildPid = indexBuild();
- // Unique background index build succeeds:
- // 1) when a document is inserted and removed with a key that has already been indexed
- // 2) when a document with a key not present in the initial set is inserted and removed
- let succeedWithoutWriteErrors = function(coll, newKey) {
- let duplicateKey = 3;
+ jsTestLog("Waiting to hang after index build of i=" + duplicateKey);
+ checkLog.contains(conn, "Hanging after index build of i=" + duplicateKey);
- turnFailPointOn("hangAfterIndexBuildOf", duplicateKey);
+ assert.commandWorked(coll.insert({i: duplicateKey, n: true}));
- let bgIndexBuildPid;
- try {
- bgIndexBuildPid = indexBuild();
+ // First insert on key not present in initial set.
+ assert.commandWorked(coll.insert({i: newKey, n: true}));
- jsTestLog("Waiting to hang after index build of i=" + duplicateKey);
- checkLog.contains(conn, "Hanging after index build of i=" + duplicateKey);
+ // Remove duplicates before completing the index build.
+ assert.commandWorked(coll.deleteOne({i: duplicateKey, n: true}));
+ assert.commandWorked(coll.deleteOne({i: newKey, n: true}));
- assert.commandWorked(coll.insert({i: duplicateKey, n: true}));
+ } finally {
+ turnFailPointOff("hangAfterIndexBuildOf");
+ }
- // First insert on key not present in initial set.
- assert.commandWorked(coll.insert({i: newKey, n: true}));
+ waitProgram(bgIndexBuildPid);
+ assert.eq(2, coll.getIndexes().length, "Index build should succeed");
+};
- // Remove duplicates before completing the index build.
- assert.commandWorked(coll.deleteOne({i: duplicateKey, n: true}));
- assert.commandWorked(coll.deleteOne({i: newKey, n: true}));
-
- } finally {
- turnFailPointOff("hangAfterIndexBuildOf");
- }
-
- waitProgram(bgIndexBuildPid);
- assert.eq(2, coll.getIndexes().length, "Index build should succeed");
- };
-
- let doTest = function() {
- "use strict";
- const size = 10;
+let doTest = function() {
+ "use strict";
+ const size = 10;
- let coll = db[baseName];
- coll.drop();
+ let coll = db[baseName];
+ coll.drop();
- for (let i = 0; i < size; ++i) {
- assert.writeOK(coll.save({i: i}));
- }
- assert.eq(size, coll.count());
- assert.eq(1, coll.getIndexes().length, "_id index should already exist");
+ for (let i = 0; i < size; ++i) {
+ assert.writeOK(coll.save({i: i}));
+ }
+ assert.eq(size, coll.count());
+ assert.eq(1, coll.getIndexes().length, "_id index should already exist");
- failOnExistingDuplicateValue(coll);
- assert.eq(size, coll.count());
+ failOnExistingDuplicateValue(coll);
+ assert.eq(size, coll.count());
- failOnInsertedDuplicateValue(coll);
- assert.eq(size, coll.count());
+ failOnInsertedDuplicateValue(coll);
+ assert.eq(size, coll.count());
- succeedWithoutWriteErrors(coll, size);
+ succeedWithoutWriteErrors(coll, size);
- waitParallel();
- };
+ waitParallel();
+};
- doTest();
+doTest();
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/indexbg_drop.js b/jstests/noPassthrough/indexbg_drop.js
index 78bfb6f3e44..6ee8e47a54d 100644
--- a/jstests/noPassthrough/indexbg_drop.js
+++ b/jstests/noPassthrough/indexbg_drop.js
@@ -4,82 +4,82 @@
* @tags: [requires_replication]
*/
(function() {
- 'use strict';
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- var dbname = 'dropbgindex';
- var collection = 'jstests_feh';
- var size = 100;
-
- // Setup the replica set.
- var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
- var nodes = replTest.nodeList();
- printjson(nodes);
-
- // We need an arbiter to ensure that the primary doesn't step down when we restart the
- // secondary.
- replTest.startSet();
- replTest.initiate({
- "_id": "bgIndex",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
-
- var master = replTest.getPrimary();
- var second = replTest.getSecondary();
-
- var masterDB = master.getDB(dbname);
- var secondDB = second.getDB(dbname);
-
- var dc = {dropIndexes: collection, index: "i_1"};
-
- // Setup collections.
- masterDB.dropDatabase();
- jsTest.log("Creating test data " + size + " documents");
- Random.setRandomSeed();
- var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
- for (i = 0; i < size; ++i) {
- bulk.insert({i: Random.rand()});
- }
- assert.writeOK(bulk.execute({w: 2, wtimeout: replTest.kDefaultTimeoutMS}));
-
- assert.commandWorked(secondDB.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
-
- jsTest.log("Starting background indexing for test of: " + tojson(dc));
-
- // Add another index to be sure the drop command works.
- masterDB.getCollection(collection).ensureIndex({b: 1});
- masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
-
- // Make sure the index build has started on the secondary.
- IndexBuildTest.waitForIndexBuildToStart(secondDB);
-
- jsTest.log("Dropping indexes");
- masterDB.runCommand({dropIndexes: collection, index: "*"});
-
- jsTest.log("Waiting on replication");
- assert.commandWorked(
- secondDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
- replTest.awaitReplication();
-
- print("Index list on master:");
- masterDB.getCollection(collection).getIndexes().forEach(printjson);
-
- // Need to assert.soon because the drop only marks the index for removal
- // the removal itself is asynchronous and may take another moment before it happens.
- var i = 0;
- assert.soon(function() {
- print("Index list on secondary (run " + i + "):");
- secondDB.getCollection(collection).getIndexes().forEach(printjson);
-
- i++;
- return 1 === secondDB.getCollection(collection).getIndexes().length;
- }, "secondary did not drop index");
-
- replTest.stopSet();
+'use strict';
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+var dbname = 'dropbgindex';
+var collection = 'jstests_feh';
+var size = 100;
+
+// Setup the replica set.
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
+var nodes = replTest.nodeList();
+printjson(nodes);
+
+// We need an arbiter to ensure that the primary doesn't step down when we restart the
+// secondary.
+replTest.startSet();
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
+
+var master = replTest.getPrimary();
+var second = replTest.getSecondary();
+
+var masterDB = master.getDB(dbname);
+var secondDB = second.getDB(dbname);
+
+var dc = {dropIndexes: collection, index: "i_1"};
+
+// Setup collections.
+masterDB.dropDatabase();
+jsTest.log("Creating test data " + size + " documents");
+Random.setRandomSeed();
+var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
+for (i = 0; i < size; ++i) {
+ bulk.insert({i: Random.rand()});
+}
+assert.writeOK(bulk.execute({w: 2, wtimeout: replTest.kDefaultTimeoutMS}));
+
+assert.commandWorked(
+ secondDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
+
+jsTest.log("Starting background indexing for test of: " + tojson(dc));
+
+// Add another index to be sure the drop command works.
+masterDB.getCollection(collection).ensureIndex({b: 1});
+masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
+
+// Make sure the index build has started on the secondary.
+IndexBuildTest.waitForIndexBuildToStart(secondDB);
+
+jsTest.log("Dropping indexes");
+masterDB.runCommand({dropIndexes: collection, index: "*"});
+
+jsTest.log("Waiting on replication");
+assert.commandWorked(
+ secondDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
+replTest.awaitReplication();
+
+print("Index list on master:");
+masterDB.getCollection(collection).getIndexes().forEach(printjson);
+
+// Need to assert.soon because the drop only marks the index for removal
+// the removal itself is asynchronous and may take another moment before it happens.
+var i = 0;
+assert.soon(function() {
+ print("Index list on secondary (run " + i + "):");
+ secondDB.getCollection(collection).getIndexes().forEach(printjson);
+
+ i++;
+ return 1 === secondDB.getCollection(collection).getIndexes().length;
+}, "secondary did not drop index");
+
+replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/indexbg_killop_apply_ops.js b/jstests/noPassthrough/indexbg_killop_apply_ops.js
index 526900232a3..6929395bc87 100644
--- a/jstests/noPassthrough/indexbg_killop_apply_ops.js
+++ b/jstests/noPassthrough/indexbg_killop_apply_ops.js
@@ -6,72 +6,71 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/noPassthrough/libs/index_build.js');
+load('jstests/noPassthrough/libs/index_build.js');
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 1}));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- const applyOpsCmd = {
- applyOps: [
- {
- op: 'c',
- ns: testDB.getCollection('$cmd').getFullName(),
- o: {
- createIndexes: coll.getName(),
- v: 2,
- name: 'a_1',
- key: {a: 1},
- background: true,
- },
+const applyOpsCmd = {
+ applyOps: [
+ {
+ op: 'c',
+ ns: testDB.getCollection('$cmd').getFullName(),
+ o: {
+ createIndexes: coll.getName(),
+ v: 2,
+ name: 'a_1',
+ key: {a: 1},
+ background: true,
},
- ]
- };
- const createIdx = startParallelShell(
- 'assert.commandWorked(db.adminCommand(' + tojson(applyOpsCmd) + '))', primary.port);
+ },
+ ]
+};
+const createIdx = startParallelShell(
+ 'assert.commandWorked(db.adminCommand(' + tojson(applyOpsCmd) + '))', primary.port);
- // When the index build starts, find its op id.
- const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
+// When the index build starts, find its op id.
+const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
- IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId);
+IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId);
- // Kill the index build. This should have no effect.
- assert.commandWorked(testDB.killOp(opId));
+// Kill the index build. This should have no effect.
+assert.commandWorked(testDB.killOp(opId));
- // Wait for the index build to stop.
- try {
- IndexBuildTest.waitForIndexBuildToStop(testDB);
- } finally {
- IndexBuildTest.resumeIndexBuilds(primary);
- }
+// Wait for the index build to stop.
+try {
+ IndexBuildTest.waitForIndexBuildToStop(testDB);
+} finally {
+ IndexBuildTest.resumeIndexBuilds(primary);
+}
- const exitCode = createIdx({checkExitSuccess: false});
- assert.neq(
- 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
+const exitCode = createIdx({checkExitSuccess: false});
+assert.neq(0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
- // Check that index was created on the primary despite the attempted killOp().
- IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
+// Check that index was created on the primary despite the attempted killOp().
+IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/indexbg_killop_primary.js b/jstests/noPassthrough/indexbg_killop_primary.js
index b4074408840..cc2e36eac8f 100644
--- a/jstests/noPassthrough/indexbg_killop_primary.js
+++ b/jstests/noPassthrough/indexbg_killop_primary.js
@@ -3,58 +3,57 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 1}));
- IndexBuildTest.pauseIndexBuilds(primary);
+IndexBuildTest.pauseIndexBuilds(primary);
- const createIdx =
- IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
+const createIdx =
+ IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
- // When the index build starts, find its op id.
- const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
+// When the index build starts, find its op id.
+const opId = IndexBuildTest.waitForIndexBuildToStart(testDB);
- IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId);
+IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId);
- // Kill the index build.
- assert.commandWorked(testDB.killOp(opId));
+// Kill the index build.
+assert.commandWorked(testDB.killOp(opId));
- // Wait for the index build to stop.
- try {
- IndexBuildTest.waitForIndexBuildToStop(testDB);
- } finally {
- IndexBuildTest.resumeIndexBuilds(primary);
- }
+// Wait for the index build to stop.
+try {
+ IndexBuildTest.waitForIndexBuildToStop(testDB);
+} finally {
+ IndexBuildTest.resumeIndexBuilds(primary);
+}
- const exitCode = createIdx({checkExitSuccess: false});
- assert.neq(
- 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
+const exitCode = createIdx({checkExitSuccess: false});
+assert.neq(0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
- // Check that no new index has been created. This verifies that the index build was aborted
- // rather than successfully completed.
- IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
+// Check that no new index has been created. This verifies that the index build was aborted
+// rather than successfully completed.
+IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/indexbg_killop_secondary.js b/jstests/noPassthrough/indexbg_killop_secondary.js
index 272fbfa108d..261d65788de 100644
--- a/jstests/noPassthrough/indexbg_killop_secondary.js
+++ b/jstests/noPassthrough/indexbg_killop_secondary.js
@@ -3,58 +3,58 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- load('jstests/noPassthrough/libs/index_build.js');
-
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+"use strict";
+
+load('jstests/noPassthrough/libs/index_build.js');
+
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB('test');
- const coll = testDB.getCollection('test');
+const primary = rst.getPrimary();
+const testDB = primary.getDB('test');
+const coll = testDB.getCollection('test');
- assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 1}));
- const secondary = rst.getSecondary();
- IndexBuildTest.pauseIndexBuilds(secondary);
+const secondary = rst.getSecondary();
+IndexBuildTest.pauseIndexBuilds(secondary);
- const createIdx =
- IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
+const createIdx =
+ IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true});
- // When the index build starts, find its op id.
- const secondaryDB = secondary.getDB(testDB.getName());
- const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
+// When the index build starts, find its op id.
+const secondaryDB = secondary.getDB(testDB.getName());
+const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB);
- IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId);
+IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId);
- // Kill the index build. This should have no effect.
- assert.commandWorked(secondaryDB.killOp(opId));
+// Kill the index build. This should have no effect.
+assert.commandWorked(secondaryDB.killOp(opId));
- // Wait for the index build to stop.
- IndexBuildTest.resumeIndexBuilds(secondary);
- IndexBuildTest.waitForIndexBuildToStop(secondaryDB);
+// Wait for the index build to stop.
+IndexBuildTest.resumeIndexBuilds(secondary);
+IndexBuildTest.waitForIndexBuildToStop(secondaryDB);
- // Expect successful createIndex command invocation in parallel shell. A new index should be
- // present on the primary.
- createIdx();
- IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
+// Expect successful createIndex command invocation in parallel shell. A new index should be
+// present on the primary.
+createIdx();
+IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']);
- // Check that index was created on the secondary despite the attempted killOp().
- const secondaryColl = secondaryDB.getCollection(coll.getName());
- IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']);
+// Check that index was created on the secondary despite the attempted killOp().
+const secondaryColl = secondaryDB.getCollection(coll.getName());
+IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/indexbg_shutdown.js b/jstests/noPassthrough/indexbg_shutdown.js
index 9e21209ff1f..88007a29e1a 100644
--- a/jstests/noPassthrough/indexbg_shutdown.js
+++ b/jstests/noPassthrough/indexbg_shutdown.js
@@ -7,96 +7,96 @@
*/
(function() {
- "use strict";
-
- load('jstests/libs/check_log.js');
- load('jstests/noPassthrough/libs/index_build.js');
-
- var dbname = 'bgIndexSec';
- var collection = 'bgIndexShutdown';
- var size = 100;
-
- // Set up replica set
- const replTest = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+"use strict";
+
+load('jstests/libs/check_log.js');
+load('jstests/noPassthrough/libs/index_build.js');
+
+var dbname = 'bgIndexSec';
+var collection = 'bgIndexShutdown';
+var size = 100;
+
+// Set up replica set
+const replTest = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- var second = replTest.getSecondary();
-
- var secondaryId = replTest.getNodeId(second);
-
- var masterDB = master.getDB(dbname);
- var secondDB = second.getDB(dbname);
-
- masterDB.dropDatabase();
- jsTest.log("creating test data " + size + " documents");
- const masterColl = masterDB.getCollection(collection);
- var bulk = masterColl.initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i, j: i * i});
- }
- assert.writeOK(bulk.execute());
-
- IndexBuildTest.pauseIndexBuilds(second);
-
- jsTest.log("Starting background indexing");
- // Using a write concern to wait for the background index build to finish on the primary node
- // and be started on the secondary node (but not completed, as the oplog entry is written before
- // the background index build finishes).
- const indexSpecs = [
- {key: {i: -1, j: -1}, name: 'ij1', background: true},
- {key: {i: -1, j: 1}, name: 'ij2', background: true},
- {key: {i: 1, j: -1}, name: 'ij3', background: true},
- {key: {i: 1, j: 1}, name: 'ij4', background: true}
- ];
-
- assert.commandWorked(masterDB.runCommand({
- createIndexes: collection,
- indexes: indexSpecs,
- writeConcern: {w: 2},
- }));
- const indexes = masterColl.getIndexes();
- // Number of indexes passed to createIndexes plus one for the _id index.
- assert.eq(indexSpecs.length + 1, indexes.length, tojson(indexes));
-
- // Wait for index builds to start on the secondary.
- const opId = IndexBuildTest.waitForIndexBuildToStart(secondDB);
- jsTestLog('Index builds started on secondary. Op ID of one of the builds: ' + opId);
-
- // Kill the index build. This should have no effect.
- assert.commandWorked(secondDB.killOp(opId));
-
- // There should be a message for each index we tried to create.
- checkLog.containsWithCount(
- replTest.getSecondary(),
- 'index build: starting on ' + masterColl.getFullName() + ' properties: { v: 2, key: { i:',
- indexSpecs.length);
-
- jsTest.log("Restarting secondary to retry replication");
-
- // Secondary should restart cleanly.
- replTest.restart(secondaryId, {}, /*wait=*/true);
-
- // There should again be a message for each index we tried to create, because the server
- // restarts the interrupted index build upon process startup. Note, the RAMLog is reset on
- // restart, so there should just be one set of messages in the RAMLog after restart, even though
- // the message was logged twice in total.
- checkLog.containsWithCount(
- replTest.getSecondary(),
- 'index build: starting on ' + masterColl.getFullName() + ' properties: { v: 2, key: { i:',
- indexSpecs.length);
-
- replTest.stopSet();
+ },
+ ]
+});
+const nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+var second = replTest.getSecondary();
+
+var secondaryId = replTest.getNodeId(second);
+
+var masterDB = master.getDB(dbname);
+var secondDB = second.getDB(dbname);
+
+masterDB.dropDatabase();
+jsTest.log("creating test data " + size + " documents");
+const masterColl = masterDB.getCollection(collection);
+var bulk = masterColl.initializeUnorderedBulkOp();
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i, j: i * i});
+}
+assert.writeOK(bulk.execute());
+
+IndexBuildTest.pauseIndexBuilds(second);
+
+jsTest.log("Starting background indexing");
+// Using a write concern to wait for the background index build to finish on the primary node
+// and be started on the secondary node (but not completed, as the oplog entry is written before
+// the background index build finishes).
+const indexSpecs = [
+ {key: {i: -1, j: -1}, name: 'ij1', background: true},
+ {key: {i: -1, j: 1}, name: 'ij2', background: true},
+ {key: {i: 1, j: -1}, name: 'ij3', background: true},
+ {key: {i: 1, j: 1}, name: 'ij4', background: true}
+];
+
+assert.commandWorked(masterDB.runCommand({
+ createIndexes: collection,
+ indexes: indexSpecs,
+ writeConcern: {w: 2},
+}));
+const indexes = masterColl.getIndexes();
+// Number of indexes passed to createIndexes plus one for the _id index.
+assert.eq(indexSpecs.length + 1, indexes.length, tojson(indexes));
+
+// Wait for index builds to start on the secondary.
+const opId = IndexBuildTest.waitForIndexBuildToStart(secondDB);
+jsTestLog('Index builds started on secondary. Op ID of one of the builds: ' + opId);
+
+// Kill the index build. This should have no effect.
+assert.commandWorked(secondDB.killOp(opId));
+
+// There should be a message for each index we tried to create.
+checkLog.containsWithCount(
+ replTest.getSecondary(),
+ 'index build: starting on ' + masterColl.getFullName() + ' properties: { v: 2, key: { i:',
+ indexSpecs.length);
+
+jsTest.log("Restarting secondary to retry replication");
+
+// Secondary should restart cleanly.
+replTest.restart(secondaryId, {}, /*wait=*/true);
+
+// There should again be a message for each index we tried to create, because the server
+// restarts the interrupted index build upon process startup. Note, the RAMLog is reset on
+// restart, so there should just be one set of messages in the RAMLog after restart, even though
+// the message was logged twice in total.
+checkLog.containsWithCount(
+ replTest.getSecondary(),
+ 'index build: starting on ' + masterColl.getFullName() + ' properties: { v: 2, key: { i:',
+ indexSpecs.length);
+
+replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/initial_sync_wt_cache_full.js b/jstests/noPassthrough/initial_sync_wt_cache_full.js
index c1a6638ea50..90d19a172ab 100644
--- a/jstests/noPassthrough/initial_sync_wt_cache_full.js
+++ b/jstests/noPassthrough/initial_sync_wt_cache_full.js
@@ -3,69 +3,68 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
- load('jstests/libs/check_log.js');
+'use strict';
+load('jstests/libs/check_log.js');
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- // Constrain the storage engine cache size to make it easier to fill it up with
- // unflushed modifications.
- wiredTigerCacheSizeGB: 1,
- },
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ // Constrain the storage engine cache size to make it easier to fill it up with
+ // unflushed modifications.
+ wiredTigerCacheSizeGB: 1,
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- const secondary = rst.restart(1, {
- startClean: true,
- setParameter:
- 'failpoint.initialSyncHangBeforeCopyingDatabases=' + tojson({mode: 'alwaysOn'})
- });
+const secondary = rst.restart(1, {
+ startClean: true,
+ setParameter: 'failpoint.initialSyncHangBeforeCopyingDatabases=' + tojson({mode: 'alwaysOn'})
+});
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- const numUpdates = 400;
- jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
- }
+const numUpdates = 400;
+jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
}
+}
- jsTestLog('Applying updates on secondary ' + secondary.host);
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
- rst.awaitReplication();
+jsTestLog('Applying updates on secondary ' + secondary.host);
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/inmem_config_str.js b/jstests/noPassthrough/inmem_config_str.js
index 8e330f57b75..ecc34ab0634 100644
--- a/jstests/noPassthrough/inmem_config_str.js
+++ b/jstests/noPassthrough/inmem_config_str.js
@@ -1,17 +1,17 @@
// SERVER-28179 Test the startup of in-memory storage engine using --inMemoryEngineConfigString
(function() {
- 'use strict';
+'use strict';
- if (jsTest.options().storageEngine !== "inMemory") {
- jsTestLog("Skipping test because storageEngine is not inMemory");
- return;
- }
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTestLog("Skipping test because storageEngine is not inMemory");
+ return;
+}
- var mongod = MongoRunner.runMongod({
- storageEngine: 'inMemory',
- inMemoryEngineConfigString: 'eviction=(threads_min=1)',
- });
- assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString");
+var mongod = MongoRunner.runMongod({
+ storageEngine: 'inMemory',
+ inMemoryEngineConfigString: 'eviction=(threads_min=1)',
+});
+assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString");
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
}());
diff --git a/jstests/noPassthrough/inmem_full.js b/jstests/noPassthrough/inmem_full.js
index a73a7f0ad69..84e85f31955 100644
--- a/jstests/noPassthrough/inmem_full.js
+++ b/jstests/noPassthrough/inmem_full.js
@@ -1,87 +1,87 @@
// SERVER-22599 Test behavior of in-memory storage engine with full cache.
(function() {
- 'use strict';
+'use strict';
- if (jsTest.options().storageEngine !== "inMemory") {
- jsTestLog("Skipping test because storageEngine is not inMemory");
- return;
- }
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTestLog("Skipping test because storageEngine is not inMemory");
+ return;
+}
- Random.setRandomSeed();
+Random.setRandomSeed();
- // Return array of approximately 1kB worth of random numbers.
- function randomArray() {
- var arr = [];
- for (var j = 0; j < 85; j++)
- arr[j] = Random.rand();
- return arr;
- }
+// Return array of approximately 1kB worth of random numbers.
+function randomArray() {
+ var arr = [];
+ for (var j = 0; j < 85; j++)
+ arr[j] = Random.rand();
+ return arr;
+}
- // Return a document of approximately 10kB in size with arrays of random numbers.
- function randomDoc() {
- var doc = {};
- for (var c of "abcdefghij")
- doc[c] = randomArray();
- return doc;
- }
+// Return a document of approximately 10kB in size with arrays of random numbers.
+function randomDoc() {
+ var doc = {};
+ for (var c of "abcdefghij")
+ doc[c] = randomArray();
+ return doc;
+}
- // Return an array with random documents totalling about 1Mb.
- function randomBatch(batchSize) {
- var batch = [];
- for (var j = 0; j < batchSize; j++)
- batch[j] = randomDoc();
- return batch;
- }
+// Return an array with random documents totalling about 1Mb.
+function randomBatch(batchSize) {
+ var batch = [];
+ for (var j = 0; j < batchSize; j++)
+ batch[j] = randomDoc();
+ return batch;
+}
- const cacheMB = 128;
- const cacheKB = 1024 * cacheMB;
- const docSizeKB = Object.bsonsize(randomDoc()) / 1024;
- const batchSize = 100;
- const batch = randomBatch(batchSize);
+const cacheMB = 128;
+const cacheKB = 1024 * cacheMB;
+const docSizeKB = Object.bsonsize(randomDoc()) / 1024;
+const batchSize = 100;
+const batch = randomBatch(batchSize);
- var mongod = MongoRunner.runMongod({
- storageEngine: 'inMemory',
- inMemoryEngineConfigString: 'cache_size=' + cacheMB + "M,",
- });
- assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString");
- var db = mongod.getDB("test");
- var t = db.large;
+var mongod = MongoRunner.runMongod({
+ storageEngine: 'inMemory',
+ inMemoryEngineConfigString: 'cache_size=' + cacheMB + "M,",
+});
+assert.neq(null, mongod, "mongod failed to started up with --inMemoryEngineConfigString");
+var db = mongod.getDB("test");
+var t = db.large;
- // Insert documents until full.
- var res;
- var count = 0;
- for (var j = 0; j < 1000; j++) {
- res = t.insert(batch);
- assert.gte(res.nInserted, 0, tojson(res));
- count += res.nInserted;
- if (res.hasErrors())
- break;
- assert.eq(res.nInserted, batchSize, tojson(res));
- print("Inserted " + count + " documents");
- }
- assert.writeError(res, "didn't get ExceededMemoryLimit but should have");
+// Insert documents until full.
+var res;
+var count = 0;
+for (var j = 0; j < 1000; j++) {
+ res = t.insert(batch);
+ assert.gte(res.nInserted, 0, tojson(res));
+ count += res.nInserted;
+ if (res.hasErrors())
+ break;
+ assert.eq(res.nInserted, batchSize, tojson(res));
print("Inserted " + count + " documents");
+}
+assert.writeError(res, "didn't get ExceededMemoryLimit but should have");
+print("Inserted " + count + " documents");
- // Should have encountered exactly one memory full error.
- assert.eq(res.getWriteErrorCount(), 1, tojson(res));
- assert.eq(res.getWriteErrorAt(0).code, ErrorCodes.ExceededMemoryLimit, tojson(res));
+// Should have encountered exactly one memory full error.
+assert.eq(res.getWriteErrorCount(), 1, tojson(res));
+assert.eq(res.getWriteErrorAt(0).code, ErrorCodes.ExceededMemoryLimit, tojson(res));
- // Should encounter memory full at between 75% and 150% of total capacity.
- assert.gt(count * docSizeKB, cacheKB * 0.75, "inserted data size is at least 75% of capacity");
- assert.lt(count * docSizeKB, cacheKB * 1.50, "inserted data size is at most 150% of capacity");
+// Should encounter memory full at between 75% and 150% of total capacity.
+assert.gt(count * docSizeKB, cacheKB * 0.75, "inserted data size is at least 75% of capacity");
+assert.lt(count * docSizeKB, cacheKB * 1.50, "inserted data size is at most 150% of capacity");
- // Indexes are sufficiently large that it should be impossible to add a new one.
- assert.commandFailedWithCode(t.createIndex({a: 1}), ErrorCodes.ExceededMemoryLimit);
+// Indexes are sufficiently large that it should be impossible to add a new one.
+assert.commandFailedWithCode(t.createIndex({a: 1}), ErrorCodes.ExceededMemoryLimit);
- // An aggregate copying all 'a' and 'b' fields should run out of memory.
- // Can't test the specific error code, because it depends on whether the collection
- // creation already fails, or just the writing. Agg wraps the original error code.
- assert.commandFailed(
- t.runCommand("aggregate", {pipeline: [{$project: {a: 1, b: 1}}, {$out: "test.out"}]}));
+// An aggregate copying all 'a' and 'b' fields should run out of memory.
+// Can't test the specific error code, because it depends on whether the collection
+// creation already fails, or just the writing. Agg wraps the original error code.
+assert.commandFailed(
+ t.runCommand("aggregate", {pipeline: [{$project: {a: 1, b: 1}}, {$out: "test.out"}]}));
- // Should still be able to query.
- assert.eq(t.find({}).itcount(), count, "cannot find expected number of documents");
- assert.eq(t.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).next().count,
- count,
- "cannot aggregate expected number of documents");
+// Should still be able to query.
+assert.eq(t.find({}).itcount(), count, "cannot find expected number of documents");
+assert.eq(t.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).next().count,
+ count,
+ "cannot aggregate expected number of documents");
}());
diff --git a/jstests/noPassthrough/internal_validate_features_as_master.js b/jstests/noPassthrough/internal_validate_features_as_master.js
index d60ee184b7c..710174cf8b3 100644
--- a/jstests/noPassthrough/internal_validate_features_as_master.js
+++ b/jstests/noPassthrough/internal_validate_features_as_master.js
@@ -1,32 +1,30 @@
// Tests the internalValidateFeaturesAsMaster server parameter.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- // internalValidateFeaturesAsMaster can be set via startup parameter.
- let conn = MongoRunner.runMongod({setParameter: "internalValidateFeaturesAsMaster=1"});
- assert.neq(null, conn, "mongod was unable to start up");
- let res = conn.adminCommand({getParameter: 1, internalValidateFeaturesAsMaster: 1});
- assert.commandWorked(res);
- assert.eq(res.internalValidateFeaturesAsMaster, true);
- MongoRunner.stopMongod(conn);
+// internalValidateFeaturesAsMaster can be set via startup parameter.
+let conn = MongoRunner.runMongod({setParameter: "internalValidateFeaturesAsMaster=1"});
+assert.neq(null, conn, "mongod was unable to start up");
+let res = conn.adminCommand({getParameter: 1, internalValidateFeaturesAsMaster: 1});
+assert.commandWorked(res);
+assert.eq(res.internalValidateFeaturesAsMaster, true);
+MongoRunner.stopMongod(conn);
- // internalValidateFeaturesAsMaster cannot be set with --replSet.
- conn = MongoRunner.runMongod(
- {replSet: "replSetName", setParameter: "internalValidateFeaturesAsMaster=0"});
- assert.eq(null, conn, "mongod was unexpectedly able to start up");
+// internalValidateFeaturesAsMaster cannot be set with --replSet.
+conn = MongoRunner.runMongod(
+ {replSet: "replSetName", setParameter: "internalValidateFeaturesAsMaster=0"});
+assert.eq(null, conn, "mongod was unexpectedly able to start up");
- conn = MongoRunner.runMongod(
- {replSet: "replSetName", setParameter: "internalValidateFeaturesAsMaster=1"});
- assert.eq(null, conn, "mongod was unexpectedly able to start up");
+conn = MongoRunner.runMongod(
+ {replSet: "replSetName", setParameter: "internalValidateFeaturesAsMaster=1"});
+assert.eq(null, conn, "mongod was unexpectedly able to start up");
- // internalValidateFeaturesAsMaster cannot be set via runtime parameter.
- conn = MongoRunner.runMongod({});
- assert.commandFailed(
- conn.adminCommand({setParameter: 1, internalValidateFeaturesAsMaster: true}));
- assert.commandFailed(
- conn.adminCommand({setParameter: 1, internalValidateFeaturesAsMaster: false}));
- MongoRunner.stopMongod(conn);
+// internalValidateFeaturesAsMaster cannot be set via runtime parameter.
+conn = MongoRunner.runMongod({});
+assert.commandFailed(conn.adminCommand({setParameter: 1, internalValidateFeaturesAsMaster: true}));
+assert.commandFailed(conn.adminCommand({setParameter: 1, internalValidateFeaturesAsMaster: false}));
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/jsHeapLimit.js b/jstests/noPassthrough/jsHeapLimit.js
index 512a79332ad..0089955c4a5 100644
--- a/jstests/noPassthrough/jsHeapLimit.js
+++ b/jstests/noPassthrough/jsHeapLimit.js
@@ -1,26 +1,24 @@
(function() {
- "use strict";
+"use strict";
- const options = {setParameter: "jsHeapLimitMB=1000"};
- const conn = MongoRunner.runMongod(options);
+const options = {
+ setParameter: "jsHeapLimitMB=1000"
+};
+const conn = MongoRunner.runMongod(options);
- // verify JSHeapLimitMB set from the shell
- var assertLimit = function() {
- assert.eq(999, getJSHeapLimitMB());
- };
- var exitCode = runMongoProgram("mongo",
- conn.host,
- "--jsHeapLimitMB",
- 999,
- "--eval",
- "(" + assertLimit.toString() + ")();");
- assert.eq(0, exitCode);
+// verify JSHeapLimitMB set from the shell
+var assertLimit = function() {
+ assert.eq(999, getJSHeapLimitMB());
+};
+var exitCode = runMongoProgram(
+ "mongo", conn.host, "--jsHeapLimitMB", 999, "--eval", "(" + assertLimit.toString() + ")();");
+assert.eq(0, exitCode);
- // verify the JSHeapLimitMB set from Mongod
- const db = conn.getDB('test');
- const res = db.adminCommand({getParameter: 1, jsHeapLimitMB: 1});
- assert.commandWorked(res);
- assert.eq(1000, res.jsHeapLimitMB);
+// verify the JSHeapLimitMB set from Mongod
+const db = conn.getDB('test');
+const res = db.adminCommand({getParameter: 1, jsHeapLimitMB: 1});
+assert.commandWorked(res);
+assert.eq(1000, res.jsHeapLimitMB);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/js_exceptions.js b/jstests/noPassthrough/js_exceptions.js
index fe7cb4aa48f..27c7f6c2a41 100644
--- a/jstests/noPassthrough/js_exceptions.js
+++ b/jstests/noPassthrough/js_exceptions.js
@@ -3,120 +3,120 @@
*
*/
(function() {
- 'use strict';
- let tests = [
- {
- callback: function() {
- UUID("asdf");
- },
- match: "Error: Invalid UUID string: asdf :",
- stack: true,
+'use strict';
+let tests = [
+ {
+ callback: function() {
+ UUID("asdf");
},
- {
- callback: function() {
- throw {};
- },
- match: "uncaught exception: \\\[object Object\\\] :",
- stack: undefined,
+ match: "Error: Invalid UUID string: asdf :",
+ stack: true,
+ },
+ {
+ callback: function() {
+ throw {};
},
- {
- callback: function() {
- throw "asdf";
- },
- match: "uncaught exception: asdf",
- stack: false,
+ match: "uncaught exception: \\\[object Object\\\] :",
+ stack: undefined,
+ },
+ {
+ callback: function() {
+ throw "asdf";
},
- {
- callback: function() {
- throw 1;
- },
- match: "uncaught exception: 1",
- stack: false,
+ match: "uncaught exception: asdf",
+ stack: false,
+ },
+ {
+ callback: function() {
+ throw 1;
},
- {
- callback: function() {
- foo.bar();
- },
- match: "uncaught exception: ReferenceError: foo is not defined :",
- stack: true,
+ match: "uncaught exception: 1",
+ stack: false,
+ },
+ {
+ callback: function() {
+ foo.bar();
},
- {
- callback: function() {
- throw function() {};
- },
- match: "function\\\(\\\) {} :",
- stack: undefined,
+ match: "uncaught exception: ReferenceError: foo is not defined :",
+ stack: true,
+ },
+ {
+ callback: function() {
+ throw function() {};
},
- {
- callback: function() {
- try {
- UUID("asdf");
- } catch (e) {
- throw(e.constructor());
- }
- },
- match: "uncaught exception: Error :",
- stack: true,
+ match: "function\\\(\\\) {} :",
+ stack: undefined,
+ },
+ {
+ callback: function() {
+ try {
+ UUID("asdf");
+ } catch (e) {
+ throw (e.constructor());
+ }
},
- {
- callback: function() {
- try {
- UUID("asdf");
- } catch (e) {
- throw(e.prototype);
- }
- },
- match: "uncaught exception: undefined",
- stack: false,
+ match: "uncaught exception: Error :",
+ stack: true,
+ },
+ {
+ callback: function() {
+ try {
+ UUID("asdf");
+ } catch (e) {
+ throw (e.prototype);
+ }
},
- ];
- function recurser(depth, limit, callback) {
- if (++depth >= limit) {
- callback();
- } else {
- recurser(depth, limit, callback);
- }
- }
- function assertMatch(m, l) {
- assert(m.test(l), m + " didn't match \"" + l + "\"");
+ match: "uncaught exception: undefined",
+ stack: false,
+ },
+];
+function recurser(depth, limit, callback) {
+ if (++depth >= limit) {
+ callback();
+ } else {
+ recurser(depth, limit, callback);
}
- tests.forEach(function(t) {
- let code = tojson(recurser);
- [1, 2, 10].forEach(function(depth) {
- clearRawMongoProgramOutput();
- assert.throws(startParallelShell(
- code + ";\nrecurser(0," + depth + "," + tojson(t.callback) + ");", false, true));
- let output = rawMongoProgramOutput();
- let lines = output.split(/\s*\n/);
- let matchShellExp = false;
- while (lines.length > 0 & matchShellExp !== true) {
- let line = lines.shift();
- if (line.match(/MongoDB shell version/)) {
- matchShellExp = true;
- }
+}
+function assertMatch(m, l) {
+ assert(m.test(l), m + " didn't match \"" + l + "\"");
+}
+tests.forEach(function(t) {
+ let code = tojson(recurser);
+ [1, 2, 10].forEach(function(depth) {
+ clearRawMongoProgramOutput();
+ assert.throws(startParallelShell(
+ code + ";\nrecurser(0," + depth + "," + tojson(t.callback) + ");", false, true));
+ let output = rawMongoProgramOutput();
+ let lines = output.split(/\s*\n/);
+ let matchShellExp = false;
+ while (lines.length > 0 & matchShellExp !== true) {
+ let line = lines.shift();
+ if (line.match(/MongoDB shell version/)) {
+ matchShellExp = true;
}
- assert(matchShellExp);
- assertMatch(/^\s*$/, lines.pop());
- assertMatch(/exiting with code/, lines.pop());
- assertMatch(new RegExp("\\\[js\\\] " + t.match + "$"), lines.shift());
+ }
+ assert(matchShellExp);
+ assertMatch(/^\s*$/, lines.pop());
+ assertMatch(/exiting with code/, lines.pop());
+ assertMatch(new RegExp("\\\[js\\\] " + t.match + "$"), lines.shift());
- if (t.stack == true) {
- assert.eq(lines.length,
- depth + 2); // plus one for the shell and one for the callback
- lines.forEach(function(l) {
- assertMatch(/\@\(shell eval\):\d+:\d+/, l);
- });
- lines.pop();
- lines.shift();
- lines.forEach(function(l) {
- assertMatch(/recurser\@/, l);
- });
- } else if (t.stack == false) {
- assert.eq(lines.length, 0);
- } else if (t.stack == undefined) {
- assert.eq(lines.length, 1);
- assertMatch(/undefined/, lines.pop());
- }
- });
+ if (t.stack == true) {
+ assert.eq(lines.length,
+ depth + 2); // plus one for the shell and one for the callback
+ lines.forEach(function(l) {
+ assertMatch(/\@\(shell eval\):\d+:\d+/, l);
+ });
+ lines.pop();
+ lines.shift();
+ lines.forEach(function(l) {
+ assertMatch(/recurser\@/, l);
+ });
+ } else if (t.stack == false) {
+ assert.eq(lines.length, 0);
+ } else if (t.stack == undefined) {
+ assert.eq(lines.length, 1);
+ assertMatch(/undefined/, lines.pop());
+ }
});
+});
})();
diff --git a/jstests/noPassthrough/js_protection.js b/jstests/noPassthrough/js_protection.js
index eda42395cd9..7783488a663 100644
--- a/jstests/noPassthrough/js_protection.js
+++ b/jstests/noPassthrough/js_protection.js
@@ -11,85 +11,84 @@
*/
(function() {
- "use strict";
-
- var testServer = MongoRunner.runMongod({setParameter: "javascriptProtection=true"});
- assert.neq(
- null, testServer, "failed to start mongod with --setParameter=javascriptProtection=true");
-
- var db = testServer.getDB("test");
- var t = db.js_protection;
-
- function assertMongoClientCorrect() {
- var functionToEval = function() {
- var doc = db.js_protection.findOne({_id: 0});
- assert.neq(null, doc);
- assert(doc.hasOwnProperty("myFunc"));
- assert.neq("function",
- typeof doc.myFunc,
- "value of BSON type Code shouldn't have been eval()ed automatically");
-
- assert.eq("undefined", typeof addOne, "addOne function has already been defined");
- db.loadServerScripts();
- assert.neq(
- "undefined", typeof addOne, "addOne function should have been eval()ed locally");
- assert.eq(5, addOne(4));
- };
-
- var exitCode = runMongoProgram("mongo",
- "--port",
- testServer.port,
- "--enableJavaScriptProtection",
- "--eval",
- "(" + functionToEval.toString() + ")();");
- assert.eq(0, exitCode);
- }
+"use strict";
- function assertNoStoredWhere() {
- t.insertOne({name: "testdoc", val: 0, y: 0});
+var testServer = MongoRunner.runMongod({setParameter: "javascriptProtection=true"});
+assert.neq(
+ null, testServer, "failed to start mongod with --setParameter=javascriptProtection=true");
- var res = t.update({$where: "addOne(this.val) === 1"}, {$set: {y: 100}}, false, true);
- assert.writeError(res);
+var db = testServer.getDB("test");
+var t = db.js_protection;
- var doc = t.findOne({name: "testdoc"});
- assert.neq(null, doc);
- assert.eq(0, doc.y, tojson(doc));
-
- res = t.update({
- $where: function() {
- return this.val === 0;
- }
- },
- {$set: {y: 100}},
- false,
- true);
- assert.writeOK(res);
-
- doc = t.findOne({name: "testdoc"});
+function assertMongoClientCorrect() {
+ var functionToEval = function() {
+ var doc = db.js_protection.findOne({_id: 0});
assert.neq(null, doc);
- assert.eq(100, doc.y, tojson(doc));
- }
+ assert(doc.hasOwnProperty("myFunc"));
+ assert.neq("function",
+ typeof doc.myFunc,
+ "value of BSON type Code shouldn't have been eval()ed automatically");
+
+ assert.eq("undefined", typeof addOne, "addOne function has already been defined");
+ db.loadServerScripts();
+ assert.neq("undefined", typeof addOne, "addOne function should have been eval()ed locally");
+ assert.eq(5, addOne(4));
+ };
+
+ var exitCode = runMongoProgram("mongo",
+ "--port",
+ testServer.port,
+ "--enableJavaScriptProtection",
+ "--eval",
+ "(" + functionToEval.toString() + ")();");
+ assert.eq(0, exitCode);
+}
+
+function assertNoStoredWhere() {
+ t.insertOne({name: "testdoc", val: 0, y: 0});
+
+ var res = t.update({$where: "addOne(this.val) === 1"}, {$set: {y: 100}}, false, true);
+ assert.writeError(res);
+
+ var doc = t.findOne({name: "testdoc"});
+ assert.neq(null, doc);
+ assert.eq(0, doc.y, tojson(doc));
+
+ res = t.update({
+ $where: function() {
+ return this.val === 0;
+ }
+ },
+ {$set: {y: 100}},
+ false,
+ true);
+ assert.writeOK(res);
- /**
- * ACTUAL TEST
- */
+ doc = t.findOne({name: "testdoc"});
+ assert.neq(null, doc);
+ assert.eq(100, doc.y, tojson(doc));
+}
- db.system.js.insertOne({
- _id: "addOne",
- value: function(x) {
- return x + 1;
- }
- });
+/**
+ * ACTUAL TEST
+ */
- t.insertOne({
- _id: 0,
- myFunc: function() {
- return "testval";
- }
- });
+db.system.js.insertOne({
+ _id: "addOne",
+ value: function(x) {
+ return x + 1;
+ }
+});
+
+t.insertOne({
+ _id: 0,
+ myFunc: function() {
+ return "testval";
+ }
+});
- assertMongoClientCorrect();
- assertNoStoredWhere();
+assertMongoClientCorrect();
+assertNoStoredWhere();
- MongoRunner.stopMongod(testServer);
+MongoRunner.stopMongod(testServer);
})();
diff --git a/jstests/noPassthrough/js_protection_roundtrip.js b/jstests/noPassthrough/js_protection_roundtrip.js
index 59a1623419b..5c0c0b4da10 100644
--- a/jstests/noPassthrough/js_protection_roundtrip.js
+++ b/jstests/noPassthrough/js_protection_roundtrip.js
@@ -8,50 +8,50 @@
* 'CodeWScope'.
*/
(function() {
- "use strict";
+"use strict";
- var testServer = MongoRunner.runMongod();
- assert.neq(null, testServer, "failed to start mongod");
- var db = testServer.getDB("test");
- var t = db.js_protection_roundtrip;
+var testServer = MongoRunner.runMongod();
+assert.neq(null, testServer, "failed to start mongod");
+var db = testServer.getDB("test");
+var t = db.js_protection_roundtrip;
- function withoutJavaScriptProtection() {
- var doc = db.js_protection_roundtrip.findOne({_id: 0});
- assert.neq(doc, null);
- assert.eq(typeof doc.myFunc, "function", "myFunc should have been presented as a function");
- assert.eq(doc.myFunc(), "yes");
- }
+function withoutJavaScriptProtection() {
+ var doc = db.js_protection_roundtrip.findOne({_id: 0});
+ assert.neq(doc, null);
+ assert.eq(typeof doc.myFunc, "function", "myFunc should have been presented as a function");
+ assert.eq(doc.myFunc(), "yes");
+}
- function withJavaScriptProtection() {
- var doc = db.js_protection_roundtrip.findOne({_id: 0});
- assert.neq(doc, null);
- assert(doc.myFunc instanceof Code, "myFunc should have been a Code object");
- doc.myFunc = eval("(" + doc.myFunc.code + ")");
- assert.eq(doc.myFunc(), "yes");
- }
+function withJavaScriptProtection() {
+ var doc = db.js_protection_roundtrip.findOne({_id: 0});
+ assert.neq(doc, null);
+ assert(doc.myFunc instanceof Code, "myFunc should have been a Code object");
+ doc.myFunc = eval("(" + doc.myFunc.code + ")");
+ assert.eq(doc.myFunc(), "yes");
+}
- function testFunctionUnmarshall(jsProtection, evalFunc) {
- var evalString = "(" + tojson(evalFunc) + ")();";
- var protectionFlag =
- jsProtection ? "--enableJavaScriptProtection" : "--disableJavaScriptProtection";
- var exitCode = runMongoProgram(
- "mongo", "--port", testServer.port, protectionFlag, "--eval", evalString);
- assert.eq(exitCode, 0);
- }
+function testFunctionUnmarshall(jsProtection, evalFunc) {
+ var evalString = "(" + tojson(evalFunc) + ")();";
+ var protectionFlag =
+ jsProtection ? "--enableJavaScriptProtection" : "--disableJavaScriptProtection";
+ var exitCode =
+ runMongoProgram("mongo", "--port", testServer.port, protectionFlag, "--eval", evalString);
+ assert.eq(exitCode, 0);
+}
- /**
- * ACTUAL TEST
- */
- var result = t.insert({
- _id: 0,
- myFunc: function() {
- return "yes";
- }
- });
- assert.writeOK(result);
+/**
+ * ACTUAL TEST
+ */
+var result = t.insert({
+ _id: 0,
+ myFunc: function() {
+ return "yes";
+ }
+});
+assert.writeOK(result);
- testFunctionUnmarshall(true, withJavaScriptProtection);
- testFunctionUnmarshall(false, withoutJavaScriptProtection);
+testFunctionUnmarshall(true, withJavaScriptProtection);
+testFunctionUnmarshall(false, withoutJavaScriptProtection);
- MongoRunner.stopMongod(testServer);
+MongoRunner.stopMongod(testServer);
})();
diff --git a/jstests/noPassthrough/json_schema_ignore_unknown_keywords.js b/jstests/noPassthrough/json_schema_ignore_unknown_keywords.js
index f16a757c3f5..5ed0be8f101 100644
--- a/jstests/noPassthrough/json_schema_ignore_unknown_keywords.js
+++ b/jstests/noPassthrough/json_schema_ignore_unknown_keywords.js
@@ -3,57 +3,56 @@
* ignores unknown keywords within $jsonSchema.
*/
(function() {
- "use strict";
-
- load("jstests/libs/assert_schema_match.js");
-
- const options = {setParameter: "internalQueryIgnoreUnknownJSONSchemaKeywords=1"};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(options));
-
- const testDB = conn.getDB("test");
- const coll = testDB.getCollection("jstests_json_schema_ignore_unsupported");
-
- assertSchemaMatch(coll, {my_keyword: "ignored", minProperties: 2}, {_id: 0}, false);
- assertSchemaMatch(coll, {my_keyword: "ignored", minProperties: 2}, {_id: 0, a: 1}, true);
- assertSchemaMatch(
- coll, {properties: {a: {my_keyword: "ignored", minProperties: 1}}}, {a: {b: 1}}, true);
-
- // Test that the same query knob does not change the behavior for unsupported keywords.
- {
- let res =
- coll.runCommand({find: coll.getName(), query: {$jsonSchema: {default: {_id: 0}}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({
- find: coll.getName(),
- query: {$jsonSchema: {definitions: {numberField: {type: "number"}}}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {format: "email"}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res =
- coll.runCommand({find: coll.getName(), query: {$jsonSchema: {id: "someschema.json"}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({
- find: coll.getName(),
- query: {$jsonSchema: {properties: {a: {$ref: "#/definitions/positiveInt"}}}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand(
- {find: coll.getName(), query: {$jsonSchema: {$schema: "hyper-schema"}}});
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
-
- res = coll.runCommand({
- find: coll.getName(),
- query: {$jsonSchema: {$schema: "http://json-schema.org/draft-04/schema#"}}
- });
- assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
- }
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+load("jstests/libs/assert_schema_match.js");
+
+const options = {
+ setParameter: "internalQueryIgnoreUnknownJSONSchemaKeywords=1"
+};
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(options));
+
+const testDB = conn.getDB("test");
+const coll = testDB.getCollection("jstests_json_schema_ignore_unsupported");
+
+assertSchemaMatch(coll, {my_keyword: "ignored", minProperties: 2}, {_id: 0}, false);
+assertSchemaMatch(coll, {my_keyword: "ignored", minProperties: 2}, {_id: 0, a: 1}, true);
+assertSchemaMatch(
+ coll, {properties: {a: {my_keyword: "ignored", minProperties: 1}}}, {a: {b: 1}}, true);
+
+// Test that the same query knob does not change the behavior for unsupported keywords.
+{
+ let res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {default: {_id: 0}}}});
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({
+ find: coll.getName(),
+ query: {$jsonSchema: {definitions: {numberField: {type: "number"}}}}
+ });
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {format: "email"}}});
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {id: "someschema.json"}}});
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({
+ find: coll.getName(),
+ query: {$jsonSchema: {properties: {a: {$ref: "#/definitions/positiveInt"}}}}
+ });
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({find: coll.getName(), query: {$jsonSchema: {$schema: "hyper-schema"}}});
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+
+ res = coll.runCommand({
+ find: coll.getName(),
+ query: {$jsonSchema: {$schema: "http://json-schema.org/draft-04/schema#"}}
+ });
+ assert.commandFailedWithCode(res, ErrorCodes.FailedToParse);
+}
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/kill_pinned_cursor.js b/jstests/noPassthrough/kill_pinned_cursor.js
index f7233bc0d66..86786fd3084 100644
--- a/jstests/noPassthrough/kill_pinned_cursor.js
+++ b/jstests/noPassthrough/kill_pinned_cursor.js
@@ -11,107 +11,104 @@
// batches are generated, this requires some special machinery to keep a cursor permanently pinned.
(function() {
- "use strict";
+"use strict";
- // This test runs manual getMores using different connections, which will not inherit the
- // implicit session of the cursor establishing command.
- TestData.disableImplicitSessions = true;
+// This test runs manual getMores using different connections, which will not inherit the
+// implicit session of the cursor establishing command.
+TestData.disableImplicitSessions = true;
- load("jstests/libs/fixture_helpers.js"); // For "isMongos".
- load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
- const st = new ShardingTest({shards: 2});
+load("jstests/libs/fixture_helpers.js"); // For "isMongos".
+load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
+const st = new ShardingTest({shards: 2});
- // Enables the specified 'failPointName', executes 'runGetMoreFunc' function in a parallel
- // shell, waits for the the failpoint to be hit, then kills the cursor and confirms that the
- // kill was successful.
- function runPinnedCursorKillTest({conn, failPointName, runGetMoreFunc}) {
- function assertFunction(cursorId, coll) {
- const db = coll.getDB();
- // Kill the cursor associated with the command and assert that the kill succeeded.
- let cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [cursorId]});
- assert.commandWorked(cmdRes);
- assert.eq(cmdRes.cursorsKilled, [cursorId]);
- assert.eq(cmdRes.cursorsAlive, []);
- assert.eq(cmdRes.cursorsNotFound, []);
- assert.eq(cmdRes.cursorsUnknown, []);
- }
- withPinnedCursor({
- conn: conn,
- sessionId: null,
- db: conn.getDB("test"),
- assertFunction: assertFunction,
- runGetMoreFunc: runGetMoreFunc,
- failPointName: failPointName,
- assertEndCounts: true
- });
+// Enables the specified 'failPointName', executes 'runGetMoreFunc' function in a parallel
+// shell, waits for the the failpoint to be hit, then kills the cursor and confirms that the
+// kill was successful.
+function runPinnedCursorKillTest({conn, failPointName, runGetMoreFunc}) {
+ function assertFunction(cursorId, coll) {
+ const db = coll.getDB();
+ // Kill the cursor associated with the command and assert that the kill succeeded.
+ let cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [cursorId]});
+ assert.commandWorked(cmdRes);
+ assert.eq(cmdRes.cursorsKilled, [cursorId]);
+ assert.eq(cmdRes.cursorsAlive, []);
+ assert.eq(cmdRes.cursorsNotFound, []);
+ assert.eq(cmdRes.cursorsUnknown, []);
}
+ withPinnedCursor({
+ conn: conn,
+ sessionId: null,
+ db: conn.getDB("test"),
+ assertFunction: assertFunction,
+ runGetMoreFunc: runGetMoreFunc,
+ failPointName: failPointName,
+ assertEndCounts: true
+ });
+}
- // Test that killing the pinned cursor before it starts building the batch results in a
- // CursorKilled exception on a replica set.
- const rs0Conn = st.rs0.getPrimary();
- const testParameters = {
- conn: rs0Conn,
- failPointName: "waitAfterPinningCursorBeforeGetMoreBatch",
- runGetMoreFunc: function() {
- const response = db.runCommand({getMore: cursorId, collection: collName});
- // We expect that the operation will get interrupted and fail.
- assert.commandFailedWithCode(response, ErrorCodes.CursorKilled);
- }
- };
- runPinnedCursorKillTest(testParameters);
+// Test that killing the pinned cursor before it starts building the batch results in a
+// CursorKilled exception on a replica set.
+const rs0Conn = st.rs0.getPrimary();
+const testParameters = {
+ conn: rs0Conn,
+ failPointName: "waitAfterPinningCursorBeforeGetMoreBatch",
+ runGetMoreFunc: function() {
+ const response = db.runCommand({getMore: cursorId, collection: collName});
+ // We expect that the operation will get interrupted and fail.
+ assert.commandFailedWithCode(response, ErrorCodes.CursorKilled);
+ }
+};
+runPinnedCursorKillTest(testParameters);
- // Check the case where a killCursor is run as we're building a getMore batch on mongod.
- (function() {
- testParameters.conn = rs0Conn;
- testParameters.failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
+// Check the case where a killCursor is run as we're building a getMore batch on mongod.
+(function() {
+testParameters.conn = rs0Conn;
+testParameters.failPointName = "waitWithPinnedCursorDuringGetMoreBatch";
- // Force yield to occur on every PlanExecutor iteration, so that the getMore is guaranteed
- // to check for interrupts.
- assert.commandWorked(testParameters.conn.getDB("admin").runCommand(
- {setParameter: 1, internalQueryExecYieldIterations: 1}));
- runPinnedCursorKillTest(testParameters);
- })();
+// Force yield to occur on every PlanExecutor iteration, so that the getMore is guaranteed
+// to check for interrupts.
+assert.commandWorked(testParameters.conn.getDB("admin").runCommand(
+ {setParameter: 1, internalQueryExecYieldIterations: 1}));
+runPinnedCursorKillTest(testParameters);
+})();
- (function() {
- // Run the equivalent test on the mongos. This time, we will force the shards to hang as
- // well. This is so that we can guarantee that the mongos is checking for interruption at
- // the appropriate time, and not just propagating an error it receives from the mongods.
- testParameters.failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
- FixtureHelpers.runCommandOnEachPrimary({
- db: st.s.getDB("admin"),
- cmdObj: {
- configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch",
- mode: "alwaysOn"
- }
- });
- testParameters.conn = st.s;
- runPinnedCursorKillTest(testParameters);
- FixtureHelpers.runCommandOnEachPrimary({
- db: st.s.getDB("admin"),
- cmdObj: {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}
- });
- })();
+(function() {
+// Run the equivalent test on the mongos. This time, we will force the shards to hang as
+// well. This is so that we can guarantee that the mongos is checking for interruption at
+// the appropriate time, and not just propagating an error it receives from the mongods.
+testParameters.failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+FixtureHelpers.runCommandOnEachPrimary({
+ db: st.s.getDB("admin"),
+ cmdObj: {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "alwaysOn"}
+});
+testParameters.conn = st.s;
+runPinnedCursorKillTest(testParameters);
+FixtureHelpers.runCommandOnEachPrimary({
+ db: st.s.getDB("admin"),
+ cmdObj: {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}
+});
+})();
- // Check this case where the interrupt comes in after the batch has been built, and is about to
- // be returned. This is relevant for both mongod and mongos.
- const connsToRunOn = [st.s, rs0Conn];
- for (let conn of connsToRunOn) {
- jsTestLog("Running on conn: " + tojson(conn));
+// Check this case where the interrupt comes in after the batch has been built, and is about to
+// be returned. This is relevant for both mongod and mongos.
+const connsToRunOn = [st.s, rs0Conn];
+for (let conn of connsToRunOn) {
+ jsTestLog("Running on conn: " + tojson(conn));
- // Test that, if the pinned cursor is killed after it has finished building a batch, that
- // batch is returned to the client but a subsequent getMore will fail with a
- // 'CursorNotFound' error.
- testParameters.failPointName = "waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch";
- testParameters.runGetMoreFunc = function() {
- const getMoreCmd = {getMore: cursorId, collection: collName, batchSize: 2};
- // We expect that the first getMore will succeed, while the second fails because the
- // cursor has been killed.
- assert.commandWorked(db.runCommand(getMoreCmd));
- assert.commandFailedWithCode(db.runCommand(getMoreCmd), ErrorCodes.CursorNotFound);
- };
+ // Test that, if the pinned cursor is killed after it has finished building a batch, that
+ // batch is returned to the client but a subsequent getMore will fail with a
+ // 'CursorNotFound' error.
+ testParameters.failPointName = "waitBeforeUnpinningOrDeletingCursorAfterGetMoreBatch";
+ testParameters.runGetMoreFunc = function() {
+ const getMoreCmd = {getMore: cursorId, collection: collName, batchSize: 2};
+ // We expect that the first getMore will succeed, while the second fails because the
+ // cursor has been killed.
+ assert.commandWorked(db.runCommand(getMoreCmd));
+ assert.commandFailedWithCode(db.runCommand(getMoreCmd), ErrorCodes.CursorNotFound);
+ };
- runPinnedCursorKillTest(testParameters);
- }
+ runPinnedCursorKillTest(testParameters);
+}
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/kill_sessions.js b/jstests/noPassthrough/kill_sessions.js
index 0211dba95ef..f0e7a05a4ba 100644
--- a/jstests/noPassthrough/kill_sessions.js
+++ b/jstests/noPassthrough/kill_sessions.js
@@ -1,13 +1,13 @@
load("jstests/libs/kill_sessions.js");
(function() {
- 'use strict';
+'use strict';
- // TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
- // if the kill command is sent with an implicit session.
- TestData.disableImplicitSessions = true;
+// TODO SERVER-35447: This test involves killing all sessions, which will not work as expected
+// if the kill command is sent with an implicit session.
+TestData.disableImplicitSessions = true;
- var conn = MongoRunner.runMongod();
- KillSessionsTestHelper.runNoAuth(conn, conn, [conn]);
- MongoRunner.stopMongod(conn);
+var conn = MongoRunner.runMongod();
+KillSessionsTestHelper.runNoAuth(conn, conn, [conn]);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/killop.js b/jstests/noPassthrough/killop.js
index 69305f25747..53f14b1f838 100644
--- a/jstests/noPassthrough/killop.js
+++ b/jstests/noPassthrough/killop.js
@@ -2,72 +2,71 @@
// @tags: [requires_replication, requires_sharding]
(function() {
- "use strict";
-
- const dbName = "killop";
- const collName = "test";
-
- // 'conn' is a connection to either a mongod when testing a replicaset or a mongos when testing
- // a sharded cluster. 'shardConn' is a connection to the mongod we enable failpoints on.
- function runTest(conn, shardConn) {
- const db = conn.getDB(dbName);
- assert.commandWorked(db.dropDatabase());
- assert.writeOK(db.getCollection(collName).insert({x: 1}));
-
- assert.commandWorked(
- shardConn.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
- assert.commandWorked(shardConn.adminCommand(
- {"configureFailPoint": "setYieldAllLocksHang", "mode": "alwaysOn"}));
-
- const queryToKill = "assert.commandWorked(db.getSiblingDB('" + dbName +
- "').runCommand({find: '" + collName + "', filter: {x: 1}}));";
- const awaitShell = startParallelShell(queryToKill, conn.port);
- let opId;
-
- assert.soon(
- function() {
- const result =
- db.currentOp({"ns": dbName + "." + collName, "command.filter": {x: 1}});
- assert.commandWorked(result);
- if (result.inprog.length === 1 && result.inprog[0].numYields > 0) {
- opId = result.inprog[0].opid;
- return true;
- }
-
- return false;
- },
- function() {
- return "Failed to find operation in currentOp() output: " +
- tojson(db.currentOp({"ns": dbName + "." + collName}));
- });
-
- assert.commandWorked(db.killOp(opId));
-
- let result = db.currentOp({"ns": dbName + "." + collName, "command.filter": {x: 1}});
- assert.commandWorked(result);
- assert(result.inprog.length === 1, tojson(db.currentOp()));
- assert(result.inprog[0].hasOwnProperty("killPending"));
- assert.eq(true, result.inprog[0].killPending);
-
- assert.commandWorked(
- shardConn.adminCommand({"configureFailPoint": "setYieldAllLocksHang", "mode": "off"}));
-
- const exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill");
-
- result = db.currentOp({"ns": dbName + "." + collName, "query.filter": {x: 1}});
- assert.commandWorked(result);
- assert(result.inprog.length === 0, tojson(db.currentOp()));
- }
-
- const st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
- const shardConn = st.rs0.getPrimary();
-
- // Test killOp against mongod.
- runTest(shardConn, shardConn);
-
- // Test killOp against mongos.
- runTest(st.s, shardConn);
-
- st.stop();
+"use strict";
+
+const dbName = "killop";
+const collName = "test";
+
+// 'conn' is a connection to either a mongod when testing a replicaset or a mongos when testing
+// a sharded cluster. 'shardConn' is a connection to the mongod we enable failpoints on.
+function runTest(conn, shardConn) {
+ const db = conn.getDB(dbName);
+ assert.commandWorked(db.dropDatabase());
+ assert.writeOK(db.getCollection(collName).insert({x: 1}));
+
+ assert.commandWorked(
+ shardConn.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
+ assert.commandWorked(
+ shardConn.adminCommand({"configureFailPoint": "setYieldAllLocksHang", "mode": "alwaysOn"}));
+
+ const queryToKill = "assert.commandWorked(db.getSiblingDB('" + dbName +
+ "').runCommand({find: '" + collName + "', filter: {x: 1}}));";
+ const awaitShell = startParallelShell(queryToKill, conn.port);
+ let opId;
+
+ assert.soon(
+ function() {
+ const result = db.currentOp({"ns": dbName + "." + collName, "command.filter": {x: 1}});
+ assert.commandWorked(result);
+ if (result.inprog.length === 1 && result.inprog[0].numYields > 0) {
+ opId = result.inprog[0].opid;
+ return true;
+ }
+
+ return false;
+ },
+ function() {
+ return "Failed to find operation in currentOp() output: " +
+ tojson(db.currentOp({"ns": dbName + "." + collName}));
+ });
+
+ assert.commandWorked(db.killOp(opId));
+
+ let result = db.currentOp({"ns": dbName + "." + collName, "command.filter": {x: 1}});
+ assert.commandWorked(result);
+ assert(result.inprog.length === 1, tojson(db.currentOp()));
+ assert(result.inprog[0].hasOwnProperty("killPending"));
+ assert.eq(true, result.inprog[0].killPending);
+
+ assert.commandWorked(
+ shardConn.adminCommand({"configureFailPoint": "setYieldAllLocksHang", "mode": "off"}));
+
+ const exitCode = awaitShell({checkExitSuccess: false});
+ assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill");
+
+ result = db.currentOp({"ns": dbName + "." + collName, "query.filter": {x: 1}});
+ assert.commandWorked(result);
+ assert(result.inprog.length === 0, tojson(db.currentOp()));
+}
+
+const st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1});
+const shardConn = st.rs0.getPrimary();
+
+// Test killOp against mongod.
+runTest(shardConn, shardConn);
+
+// Test killOp against mongos.
+runTest(st.s, shardConn);
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
index e3f10dade92..5b1757188e7 100644
--- a/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
+++ b/jstests/noPassthrough/latency_includes_lock_acquisition_time.js
@@ -4,152 +4,151 @@
* @tags: [requires_profiling]
*/
(function() {
- "use strict";
+"use strict";
- /**
- * Configures the server to wait for 'millis' while acquiring locks in the CRUD path, then
- * invokes the no-arguments function 'func', then disables the aforementioned lock wait
- * behavior.
- */
- function runWithWait(millis, func) {
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "setAutoGetCollectionWait",
- mode: "alwaysOn",
- data: {waitForMillis: millis}
- }));
- func();
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "setAutoGetCollectionWait",
- mode: "off",
- }));
- }
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/profiler.js");
+/**
+ * Configures the server to wait for 'millis' while acquiring locks in the CRUD path, then
+ * invokes the no-arguments function 'func', then disables the aforementioned lock wait
+ * behavior.
+ */
+function runWithWait(millis, func) {
+ assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "setAutoGetCollectionWait",
+ mode: "alwaysOn",
+ data: {waitForMillis: millis}
+ }));
+ func();
+ assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "setAutoGetCollectionWait",
+ mode: "off",
+ }));
+}
- let hangMillis = 200;
- let padding = hangMillis / 10;
+load("jstests/libs/check_log.js");
+load("jstests/libs/profiler.js");
- let conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
- let testDB = conn.getDB("test");
- let testColl = testDB.lock_acquisition_time;
+let hangMillis = 200;
+let padding = hangMillis / 10;
- function runTests() {
- // Profile all operations.
- assert.commandWorked(testDB.setProfilingLevel(0));
- testDB.system.profile.drop();
- assert.commandWorked(testDB.setProfilingLevel(2));
+let conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+let testDB = conn.getDB("test");
+let testColl = testDB.lock_acquisition_time;
- // Test that insert profiler/logs include lock acquisition time. Rather than parsing the log
- // lines, we are just verifying that the log line appears, which implies that the recorded
- // latency exceeds slowms.
- runWithWait(hangMillis, function() {
- assert.writeOK(testColl.insert({a: 1}));
- });
- let profileEntry;
- if (conn.writeMode() === "commands") {
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.insert": testColl.getName(),
- });
- } else {
- profileEntry = getLatestProfilerEntry(testDB, {
- op: "insert",
- ns: testColl.getFullName(),
- });
- }
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn,
- conn.writeMode() === "commands"
- ? "insert { insert: \"lock_acquisition_time\""
- : "insert test.lock_acquisition_time");
+function runTests() {
+ // Profile all operations.
+ assert.commandWorked(testDB.setProfilingLevel(0));
+ testDB.system.profile.drop();
+ assert.commandWorked(testDB.setProfilingLevel(2));
- // Test that update profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.writeOK(testColl.update({}, {$set: {b: 1}}));
- });
+ // Test that insert profiler/logs include lock acquisition time. Rather than parsing the log
+ // lines, we are just verifying that the log line appears, which implies that the recorded
+ // latency exceeds slowms.
+ runWithWait(hangMillis, function() {
+ assert.writeOK(testColl.insert({a: 1}));
+ });
+ let profileEntry;
+ if (conn.writeMode() === "commands") {
profileEntry = getLatestProfilerEntry(testDB, {
ns: testColl.getFullName(),
- "command.u": {$eq: {$set: {b: 1}}},
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "update { update: \"lock_acquisition_time\"");
-
- // Test that find profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq(1, testColl.find({b: 1}).itcount());
+ "command.insert": testColl.getName(),
});
+ } else {
profileEntry = getLatestProfilerEntry(testDB, {
+ op: "insert",
ns: testColl.getFullName(),
- "command.find": testColl.getName(),
});
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "find { find: \"lock_acquisition_time\"");
+ }
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn,
+ conn.writeMode() === "commands" ? "insert { insert: \"lock_acquisition_time\""
+ : "insert test.lock_acquisition_time");
- // Test that getMore profiler/logs include lock acquisition time.
- assert.writeOK(testColl.insert([{a: 2}, {a: 3}]));
- runWithWait(hangMillis, function() {
- // Include a batchSize in order to ensure that a getMore is issued.
- assert.eq(3, testColl.find().batchSize(2).itcount());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.getMore": {$exists: true},
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "originatingCommand: { find: \"lock_acquisition_time\"");
- assert.writeOK(testColl.remove({a: {$gt: 1}}));
+ // Test that update profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.writeOK(testColl.update({}, {$set: {b: 1}}));
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.u": {$eq: {$set: {b: 1}}},
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "update { update: \"lock_acquisition_time\"");
- // Test that aggregate profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq(1, testColl.aggregate([{$match: {b: 1}}]).itcount());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.aggregate": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "aggregate { aggregate: \"lock_acquisition_time\"");
+ // Test that find profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.eq(1, testColl.find({b: 1}).itcount());
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.find": testColl.getName(),
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "find { find: \"lock_acquisition_time\"");
- // Test that count profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq(1, testColl.count());
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.count": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "count { count: \"lock_acquisition_time\"");
+ // Test that getMore profiler/logs include lock acquisition time.
+ assert.writeOK(testColl.insert([{a: 2}, {a: 3}]));
+ runWithWait(hangMillis, function() {
+ // Include a batchSize in order to ensure that a getMore is issued.
+ assert.eq(3, testColl.find().batchSize(2).itcount());
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.getMore": {$exists: true},
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "originatingCommand: { find: \"lock_acquisition_time\"");
+ assert.writeOK(testColl.remove({a: {$gt: 1}}));
- // Test that distinct profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.eq([1], testColl.distinct("a"));
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.distinct": testColl.getName(),
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "distinct { distinct: \"lock_acquisition_time\"");
+ // Test that aggregate profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.eq(1, testColl.aggregate([{$match: {b: 1}}]).itcount());
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.aggregate": testColl.getName(),
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "aggregate { aggregate: \"lock_acquisition_time\"");
- // Test that delete profiler/logs include lock acquisition time.
- runWithWait(hangMillis, function() {
- assert.writeOK(testColl.remove({b: 1}));
- });
- profileEntry = getLatestProfilerEntry(testDB, {
- ns: testColl.getFullName(),
- "command.q": {b: 1},
- });
- assert.gte(profileEntry.millis, hangMillis - padding);
- checkLog.contains(conn, "delete { delete: \"lock_acquisition_time\"");
- }
+ // Test that count profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.eq(1, testColl.count());
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.count": testColl.getName(),
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "count { count: \"lock_acquisition_time\"");
+
+ // Test that distinct profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.eq([1], testColl.distinct("a"));
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.distinct": testColl.getName(),
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "distinct { distinct: \"lock_acquisition_time\"");
+
+ // Test that delete profiler/logs include lock acquisition time.
+ runWithWait(hangMillis, function() {
+ assert.writeOK(testColl.remove({b: 1}));
+ });
+ profileEntry = getLatestProfilerEntry(testDB, {
+ ns: testColl.getFullName(),
+ "command.q": {b: 1},
+ });
+ assert.gte(profileEntry.millis, hangMillis - padding);
+ checkLog.contains(conn, "delete { delete: \"lock_acquisition_time\"");
+}
- // Run the tests once with read and write commands and once with legacy ops.
- runTests();
- conn.forceWriteMode("compatibility");
- conn.forceReadMode("legacy");
- runTests();
- MongoRunner.stopMongod(conn);
+// Run the tests once with read and write commands and once with legacy ops.
+runTests();
+conn.forceWriteMode("compatibility");
+conn.forceReadMode("legacy");
+runTests();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/launcher_test.js b/jstests/noPassthrough/launcher_test.js
index a4d00ae19f7..a9fc9384c4b 100644
--- a/jstests/noPassthrough/launcher_test.js
+++ b/jstests/noPassthrough/launcher_test.js
@@ -1,32 +1,31 @@
// Note: This test cannot be run in parallel because all output from child processes of the same
// shell is multiplexed to the same buffer.
(function() {
- "use strict";
+"use strict";
- // Note: the windows command line length limit is 8191 characters, so keep this string length
- // under that.
- const numLines = 300;
- const lineContents = "lots of super fun text\n".repeat(numLines).trim();
+// Note: the windows command line length limit is 8191 characters, so keep this string length
+// under that.
+const numLines = 300;
+const lineContents = "lots of super fun text\n".repeat(numLines).trim();
- var echoTest = function() {
- clearRawMongoProgramOutput();
+var echoTest = function() {
+ clearRawMongoProgramOutput();
- // This will produce `numLines` + 1 lines of output because echo isn't being called with
- // `-n`. This will block until the program exits.
- var exitCode = runProgram("echo", lineContents);
- var output = rawMongoProgramOutput();
+ // This will produce `numLines` + 1 lines of output because echo isn't being called with
+ // `-n`. This will block until the program exits.
+ var exitCode = runProgram("echo", lineContents);
+ var output = rawMongoProgramOutput();
- assert.eq(0, exitCode);
+ assert.eq(0, exitCode);
- assert.eq(numLines,
- output.split('\n').length - 1,
- "didn't wait for program's output buffer to finish being consumed");
- };
-
- // The motivating failure for the test was a race in runProgram. Empirically, 10 runs has always
- // been sufficient for this to fail. 16 gives the test some leeway.
- for (var i = 0; i < 16; i++) {
- echoTest();
- }
+ assert.eq(numLines,
+ output.split('\n').length - 1,
+ "didn't wait for program's output buffer to finish being consumed");
+};
+// The motivating failure for the test was a race in runProgram. Empirically, 10 runs has always
+// been sufficient for this to fail. 16 gives the test some leeway.
+for (var i = 0; i < 16; i++) {
+ echoTest();
+}
})();
diff --git a/jstests/noPassthrough/libs/backup_restore.js b/jstests/noPassthrough/libs/backup_restore.js
index b18fb9e25d6..37411b9d061 100644
--- a/jstests/noPassthrough/libs/backup_restore.js
+++ b/jstests/noPassthrough/libs/backup_restore.js
@@ -152,8 +152,8 @@ var BackupRestoreTest = function(options) {
assert(options.backup, "Backup option not supplied");
assert.contains(options.backup,
allowedBackupKeys,
- 'invalid option: ' + tojson(options.backup) + '; valid options are: ' +
- tojson(allowedBackupKeys));
+ 'invalid option: ' + tojson(options.backup) +
+ '; valid options are: ' + tojson(allowedBackupKeys));
// Number of nodes in initial replica set (default 3)
var numNodes = options.nodes || 3;
diff --git a/jstests/noPassthrough/libs/configExpand/lib.js b/jstests/noPassthrough/libs/configExpand/lib.js
index c1ba975565b..c3125d99a2f 100644
--- a/jstests/noPassthrough/libs/configExpand/lib.js
+++ b/jstests/noPassthrough/libs/configExpand/lib.js
@@ -4,8 +4,8 @@
class ConfigExpandRestServer {
/**
- * Create a new webserver.
- */
+ * Create a new webserver.
+ */
constructor() {
load('jstests/libs/python.js');
this.python = getPython3Binary();
diff --git a/jstests/noPassthrough/libs/index_build.js b/jstests/noPassthrough/libs/index_build.js
index 1d3171e13f5..8de49ceb06e 100644
--- a/jstests/noPassthrough/libs/index_build.js
+++ b/jstests/noPassthrough/libs/index_build.js
@@ -71,8 +71,8 @@ class IndexBuildTest {
const inprog = database.currentOp({opid: opId}).inprog;
assert.eq(1,
inprog.length,
- 'unable to find opid ' + opId + ' in currentOp() result: ' +
- tojson(database.currentOp()));
+ 'unable to find opid ' + opId +
+ ' in currentOp() result: ' + tojson(database.currentOp()));
const op = inprog[0];
assert.eq(opId, op.opid, 'db.currentOp() returned wrong index build info: ' + tojson(op));
if (onOperationFn) {
@@ -98,16 +98,14 @@ class IndexBuildTest {
assert.eq(0, res.cursor.id);
// A map of index specs keyed by index name.
- const indexMap = res.cursor.firstBatch.reduce(
- (m, spec) => {
- if (spec.hasOwnProperty('buildUUID')) {
- m[spec.spec.name] = spec;
- } else {
- m[spec.name] = spec;
- }
- return m;
- },
- {});
+ const indexMap = res.cursor.firstBatch.reduce((m, spec) => {
+ if (spec.hasOwnProperty('buildUUID')) {
+ m[spec.spec.name] = spec;
+ } else {
+ m[spec.name] = spec;
+ }
+ return m;
+ }, {});
// Check ready indexes.
for (let name of readyIndexes) {
diff --git a/jstests/noPassthrough/list_databases_and_rename_collection.js b/jstests/noPassthrough/list_databases_and_rename_collection.js
index 9faebcb7dc8..d5504d2582d 100644
--- a/jstests/noPassthrough/list_databases_and_rename_collection.js
+++ b/jstests/noPassthrough/list_databases_and_rename_collection.js
@@ -3,57 +3,57 @@
//
(function() {
- "use strict";
- const dbName = "do_concurrent_rename";
- const collName = "collA";
- const otherName = "collB";
- const repeatListDatabases = 20;
- const listDatabasesCmd = {"listDatabases": 1};
- load("jstests/noPassthrough/libs/concurrent_rename.js");
- load("jstests/libs/parallel_shell_helpers.js");
-
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
- jsTestLog("Create collection.");
- let listRenameDB = conn.getDB(dbName);
- listRenameDB.dropDatabase();
- assert.commandWorked(listRenameDB.runCommand({"create": collName}));
-
- let testDB = conn.getDB("test");
- testDB.dropDatabase();
-
- jsTestLog("Verify database exists.");
- let cmdRes = listRenameDB.adminCommand(listDatabasesCmd);
- assert.commandWorked(cmdRes, "expected " + tojson(listDatabasesCmd) + " to be successful.");
- assert(cmdRes.hasOwnProperty("databases"),
- "expected " + tojson(cmdRes) + " to have a databases property.");
- assert(cmdRes.databases.map(d => d.name).includes(dbName),
- "expected " + tojson(cmdRes) + " to include " + dbName);
-
- jsTestLog("Start parallel shell");
- let renameShell =
- startParallelShell(funWithArgs(doRenames, dbName, collName, otherName), conn.port);
-
- // Wait until we receive confirmation that the parallel shell has started.
- assert.soon(() => conn.getDB("test").await_data.findOne({_id: "signal parent shell"}) !== null);
-
- jsTestLog("Start listDatabases.");
- while (conn.getDB("test").await_data.findOne({_id: "rename has ended"}) == null) {
- for (let i = 0; i < repeatListDatabases; i++) {
- cmdRes = listRenameDB.adminCommand(listDatabasesCmd);
- assert.commandWorked(cmdRes,
- "expected " + tojson(listDatabasesCmd) + " to be successful.");
- // Database should always exist.
- assert(cmdRes.hasOwnProperty("databases"),
- "expected " + tojson(cmdRes) + " to have a databases property.");
- assert(cmdRes.databases.map(d => d.name).includes(dbName),
- "expected " + tojson(cmdRes) + " to include " + dbName);
- }
+"use strict";
+const dbName = "do_concurrent_rename";
+const collName = "collA";
+const otherName = "collB";
+const repeatListDatabases = 20;
+const listDatabasesCmd = {
+ "listDatabases": 1
+};
+load("jstests/noPassthrough/libs/concurrent_rename.js");
+load("jstests/libs/parallel_shell_helpers.js");
+
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
+jsTestLog("Create collection.");
+let listRenameDB = conn.getDB(dbName);
+listRenameDB.dropDatabase();
+assert.commandWorked(listRenameDB.runCommand({"create": collName}));
+
+let testDB = conn.getDB("test");
+testDB.dropDatabase();
+
+jsTestLog("Verify database exists.");
+let cmdRes = listRenameDB.adminCommand(listDatabasesCmd);
+assert.commandWorked(cmdRes, "expected " + tojson(listDatabasesCmd) + " to be successful.");
+assert(cmdRes.hasOwnProperty("databases"),
+ "expected " + tojson(cmdRes) + " to have a databases property.");
+assert(cmdRes.databases.map(d => d.name).includes(dbName),
+ "expected " + tojson(cmdRes) + " to include " + dbName);
+
+jsTestLog("Start parallel shell");
+let renameShell =
+ startParallelShell(funWithArgs(doRenames, dbName, collName, otherName), conn.port);
+
+// Wait until we receive confirmation that the parallel shell has started.
+assert.soon(() => conn.getDB("test").await_data.findOne({_id: "signal parent shell"}) !== null);
+
+jsTestLog("Start listDatabases.");
+while (conn.getDB("test").await_data.findOne({_id: "rename has ended"}) == null) {
+ for (let i = 0; i < repeatListDatabases; i++) {
+ cmdRes = listRenameDB.adminCommand(listDatabasesCmd);
+ assert.commandWorked(cmdRes, "expected " + tojson(listDatabasesCmd) + " to be successful.");
+ // Database should always exist.
+ assert(cmdRes.hasOwnProperty("databases"),
+ "expected " + tojson(cmdRes) + " to have a databases property.");
+ assert(cmdRes.databases.map(d => d.name).includes(dbName),
+ "expected " + tojson(cmdRes) + " to include " + dbName);
}
+}
- jsTestLog("Finished running listDatabases.");
-
- renameShell();
- MongoRunner.stopMongod(conn);
+jsTestLog("Finished running listDatabases.");
+renameShell();
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/list_indexes_ready_and_in_progress.js b/jstests/noPassthrough/list_indexes_ready_and_in_progress.js
index 0ec11308b8a..a1970beea83 100644
--- a/jstests/noPassthrough/list_indexes_ready_and_in_progress.js
+++ b/jstests/noPassthrough/list_indexes_ready_and_in_progress.js
@@ -2,39 +2,39 @@
* Tests that the listIndexes command shows ready and in-progress indexes.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/noPassthrough/libs/index_build.js");
+load("jstests/noPassthrough/libs/index_build.js");
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
- assert.commandWorked(testDB.dropDatabase());
+const testDB = conn.getDB("test");
+assert.commandWorked(testDB.dropDatabase());
- let coll = testDB.list_indexes_ready_and_in_progress;
- coll.drop();
- assert.commandWorked(testDB.createCollection(coll.getName()));
- IndexBuildTest.assertIndexes(coll, 1, ["_id_"]);
- assert.commandWorked(coll.createIndex({a: 1}));
- IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
+let coll = testDB.list_indexes_ready_and_in_progress;
+coll.drop();
+assert.commandWorked(testDB.createCollection(coll.getName()));
+IndexBuildTest.assertIndexes(coll, 1, ["_id_"]);
+assert.commandWorked(coll.createIndex({a: 1}));
+IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
- IndexBuildTest.pauseIndexBuilds(conn);
- const createIdx =
- IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {b: 1}, {background: true});
- IndexBuildTest.waitForIndexBuildToStart(testDB);
+IndexBuildTest.pauseIndexBuilds(conn);
+const createIdx =
+ IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {b: 1}, {background: true});
+IndexBuildTest.waitForIndexBuildToStart(testDB);
- // The listIndexes command supports returning all indexes, including ones that are not ready.
- IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1"], ["b_1"], {includeBuildUUIDs: true});
+// The listIndexes command supports returning all indexes, including ones that are not ready.
+IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1"], ["b_1"], {includeBuildUUIDs: true});
- IndexBuildTest.resumeIndexBuilds(conn);
+IndexBuildTest.resumeIndexBuilds(conn);
- // Wait for the index build to stop.
- IndexBuildTest.waitForIndexBuildToStop(testDB);
+// Wait for the index build to stop.
+IndexBuildTest.waitForIndexBuildToStop(testDB);
- const exitCode = createIdx();
- assert.eq(0, exitCode, 'expected shell to exit cleanly');
+const exitCode = createIdx();
+assert.eq(0, exitCode, 'expected shell to exit cleanly');
- IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1", "b_1"]);
- MongoRunner.stopMongod(conn);
+IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1", "b_1"]);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/list_indexes_with_build_uuids.js b/jstests/noPassthrough/list_indexes_with_build_uuids.js
index 76bea4b5a36..a52b58578a5 100644
--- a/jstests/noPassthrough/list_indexes_with_build_uuids.js
+++ b/jstests/noPassthrough/list_indexes_with_build_uuids.js
@@ -4,80 +4,78 @@
* @tags: [requires_replication]
*/
(function() {
- 'use strict';
-
- const dbName = "test";
- const collName = "coll";
-
- const firstIndexName = "first";
- const secondIndexName = "second";
-
- function addTestDocuments(db) {
- let size = 100;
- jsTest.log("Creating " + size + " test documents.");
- var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i, j: i * i});
- }
- assert.writeOK(bulk.execute());
+'use strict';
+
+const dbName = "test";
+const collName = "coll";
+
+const firstIndexName = "first";
+const secondIndexName = "second";
+
+function addTestDocuments(db) {
+ let size = 100;
+ jsTest.log("Creating " + size + " test documents.");
+ var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i, j: i * i});
}
+ assert.writeOK(bulk.execute());
+}
- let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2});
- let nodes = replSet.nodeList();
+let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2});
+let nodes = replSet.nodeList();
- replSet.startSet({startClean: true});
- replSet.initiate({
- _id: "indexBuilds",
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], votes: 0, priority: 0},
- ]
- });
+replSet.startSet({startClean: true});
+replSet.initiate({
+ _id: "indexBuilds",
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], votes: 0, priority: 0},
+ ]
+});
- let primary = replSet.getPrimary();
- let primaryDB = primary.getDB(dbName);
+let primary = replSet.getPrimary();
+let primaryDB = primary.getDB(dbName);
- let secondary = replSet.getSecondary();
- let secondaryDB = secondary.getDB(dbName);
+let secondary = replSet.getSecondary();
+let secondaryDB = secondary.getDB(dbName);
- addTestDocuments(primaryDB);
- replSet.awaitReplication();
+addTestDocuments(primaryDB);
+replSet.awaitReplication();
- // Build and finish the first index.
- assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {i: 1}, name: firstIndexName, background: true}]
- }));
- replSet.waitForAllIndexBuildsToFinish(dbName, collName);
+// Build and finish the first index.
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {i: 1}, name: firstIndexName, background: true}]}));
+replSet.waitForAllIndexBuildsToFinish(dbName, collName);
- // Start hanging index builds on the secondary.
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
+// Start hanging index builds on the secondary.
+assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
- // Build and hang on the second index.
- assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {j: 1}, name: secondIndexName, background: true}],
- writeConcern: {w: 2}
- }));
+// Build and hang on the second index.
+assert.commandWorked(primaryDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {j: 1}, name: secondIndexName, background: true}],
+ writeConcern: {w: 2}
+}));
- // Check the listIndexes() output.
- let res = secondaryDB.runCommand({listIndexes: collName, includeBuildUUIDs: true});
+// Check the listIndexes() output.
+let res = secondaryDB.runCommand({listIndexes: collName, includeBuildUUIDs: true});
- assert.commandWorked(res);
- let indexes = res.cursor.firstBatch;
- assert.eq(3, indexes.length);
+assert.commandWorked(res);
+let indexes = res.cursor.firstBatch;
+assert.eq(3, indexes.length);
- jsTest.log(indexes);
+jsTest.log(indexes);
- assert.eq(indexes[0].name, "_id_");
- assert.eq(indexes[1].name, "first");
- assert.eq(indexes[2].spec.name, "second");
- assert(indexes[2].hasOwnProperty("buildUUID"));
+assert.eq(indexes[0].name, "_id_");
+assert.eq(indexes[1].name, "first");
+assert.eq(indexes[2].spec.name, "second");
+assert(indexes[2].hasOwnProperty("buildUUID"));
- // Allow the secondary to finish the index build.
- assert.commandWorked(
- secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
+// Allow the secondary to finish the index build.
+assert.commandWorked(
+ secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
- replSet.stopSet();
+replSet.stopSet();
}());
diff --git a/jstests/noPassthrough/lock_file.js b/jstests/noPassthrough/lock_file.js
index 5ff9a897bf9..63311e22bfb 100644
--- a/jstests/noPassthrough/lock_file.js
+++ b/jstests/noPassthrough/lock_file.js
@@ -2,30 +2,28 @@
// containing the process ID regardless of the storage engine requested.
(function() {
- // Ensures that mongod.lock exists and returns size of file.
- function getMongodLockFileSize(dir) {
- var files = listFiles(dir);
- for (var i in files) {
- var file = files[i];
- if (!file.isDirectory && file.baseName == 'mongod.lock') {
- return file.size;
- }
+// Ensures that mongod.lock exists and returns size of file.
+function getMongodLockFileSize(dir) {
+ var files = listFiles(dir);
+ for (var i in files) {
+ var file = files[i];
+ if (!file.isDirectory && file.baseName == 'mongod.lock') {
+ return file.size;
}
- assert(false, 'mongod.lock not found in data directory ' + dir);
}
+ assert(false, 'mongod.lock not found in data directory ' + dir);
+}
- var baseName = "jstests_lock_file";
- var dbpath = MongoRunner.dataPath + baseName + '/';
+var baseName = "jstests_lock_file";
+var dbpath = MongoRunner.dataPath + baseName + '/';
- // Test framework will append --storageEngine command line option.
- var mongod = MongoRunner.runMongod({dbpath: dbpath});
- assert.neq(0,
- getMongodLockFileSize(dbpath),
- 'mongod.lock should not be empty while server is running');
+// Test framework will append --storageEngine command line option.
+var mongod = MongoRunner.runMongod({dbpath: dbpath});
+assert.neq(
+ 0, getMongodLockFileSize(dbpath), 'mongod.lock should not be empty while server is running');
- MongoRunner.stopMongod(mongod);
+MongoRunner.stopMongod(mongod);
- // mongod.lock must be empty after shutting server down.
- assert.eq(
- 0, getMongodLockFileSize(dbpath), 'mongod.lock not truncated after shutting server down');
+// mongod.lock must be empty after shutting server down.
+assert.eq(0, getMongodLockFileSize(dbpath), 'mongod.lock not truncated after shutting server down');
}());
diff --git a/jstests/noPassthrough/lock_file_fail_to_open.js b/jstests/noPassthrough/lock_file_fail_to_open.js
index 59d5fadbb5f..a53c6688b9f 100644
--- a/jstests/noPassthrough/lock_file_fail_to_open.js
+++ b/jstests/noPassthrough/lock_file_fail_to_open.js
@@ -1,27 +1,27 @@
// Tests that MongoD fails to start with the correct error message if mongod.lock exists in the
// dbpath.
(function() {
- "use strict";
+"use strict";
- var baseName = "jstests_lock_file_fail_to_open";
+var baseName = "jstests_lock_file_fail_to_open";
- var dbPath = MongoRunner.dataPath + baseName + "/";
+var dbPath = MongoRunner.dataPath + baseName + "/";
- // Start a MongoD just to get a lockfile in place.
- var mongo1 = MongoRunner.runMongod({dbpath: dbPath, waitForConnect: true});
+// Start a MongoD just to get a lockfile in place.
+var mongo1 = MongoRunner.runMongod({dbpath: dbPath, waitForConnect: true});
- clearRawMongoProgramOutput();
- // Start another one which should fail to start as there is already a lockfile in its
- // dbpath.
- var mongo2 = null;
- mongo2 = MongoRunner.runMongod({dbpath: dbPath, noCleanData: true});
- // We should have failed to start.
- assert(mongo2 === null);
+clearRawMongoProgramOutput();
+// Start another one which should fail to start as there is already a lockfile in its
+// dbpath.
+var mongo2 = null;
+mongo2 = MongoRunner.runMongod({dbpath: dbPath, noCleanData: true});
+// We should have failed to start.
+assert(mongo2 === null);
- var logContents = rawMongoProgramOutput();
- assert(logContents.indexOf("Unable to lock the lock file") > 0 ||
- // Windows error message is different.
- logContents.indexOf("Unable to create/open the lock file") > 0);
+var logContents = rawMongoProgramOutput();
+assert(logContents.indexOf("Unable to lock the lock file") > 0 ||
+ // Windows error message is different.
+ logContents.indexOf("Unable to create/open the lock file") > 0);
- MongoRunner.stopMongod(mongo1);
+MongoRunner.stopMongod(mongo1);
})();
diff --git a/jstests/noPassthrough/lock_stats.js b/jstests/noPassthrough/lock_stats.js
index 85e6350ab0c..1274dd326c4 100644
--- a/jstests/noPassthrough/lock_stats.js
+++ b/jstests/noPassthrough/lock_stats.js
@@ -3,66 +3,66 @@
// This test uses the fsync command to induce locking.
// @tags: [requires_fsync]
(function() {
- 'use strict';
+'use strict';
- function testBlockTime(blockTimeMillis) {
- // Lock the database, and in parallel start an operation that needs the lock, so it blocks.
- assert.commandWorked(db.fsyncLock());
- var startStats = db.serverStatus().locks.Global;
- var startTime = new Date();
- var minBlockedMillis = blockTimeMillis;
- // This is just some command that requires a MODE_X global lock that conflicts.
- var s = startParallelShell(
- 'assert.commandWorked(db.getSiblingDB(\'nonexisting\').dropDatabase());', conn.port);
+function testBlockTime(blockTimeMillis) {
+ // Lock the database, and in parallel start an operation that needs the lock, so it blocks.
+ assert.commandWorked(db.fsyncLock());
+ var startStats = db.serverStatus().locks.Global;
+ var startTime = new Date();
+ var minBlockedMillis = blockTimeMillis;
+ // This is just some command that requires a MODE_X global lock that conflicts.
+ var s = startParallelShell(
+ 'assert.commandWorked(db.getSiblingDB(\'nonexisting\').dropDatabase());', conn.port);
- // Wait until we see somebody waiting to acquire the lock, defend against unset stats.
- assert.soon((function() {
- var stats = db.serverStatus().locks.Global;
- if (!stats.acquireWaitCount || !stats.acquireWaitCount.W)
- return false;
- if (!stats.timeAcquiringMicros || !stats.timeAcquiringMicros.W)
- return false;
- if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W)
- return true;
- return stats.acquireWaitCount.W > startStats.acquireWaitCount.W;
- }));
+ // Wait until we see somebody waiting to acquire the lock, defend against unset stats.
+ assert.soon((function() {
+ var stats = db.serverStatus().locks.Global;
+ if (!stats.acquireWaitCount || !stats.acquireWaitCount.W)
+ return false;
+ if (!stats.timeAcquiringMicros || !stats.timeAcquiringMicros.W)
+ return false;
+ if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W)
+ return true;
+ return stats.acquireWaitCount.W > startStats.acquireWaitCount.W;
+ }));
- // Sleep for minBlockedMillis, so the acquirer would have to wait at least that long.
- sleep(minBlockedMillis);
- db.fsyncUnlock();
+ // Sleep for minBlockedMillis, so the acquirer would have to wait at least that long.
+ sleep(minBlockedMillis);
+ db.fsyncUnlock();
- // Wait for the parallel shell to finish, so its stats will have been recorded.
- s();
+ // Wait for the parallel shell to finish, so its stats will have been recorded.
+ s();
- // The fsync command from the shell cannot have possibly been blocked longer than this.
- var maxBlockedMillis = new Date() - startTime;
- var endStats = db.serverStatus().locks.Global;
+ // The fsync command from the shell cannot have possibly been blocked longer than this.
+ var maxBlockedMillis = new Date() - startTime;
+ var endStats = db.serverStatus().locks.Global;
- // The server was just started, so initial stats may be missing.
- if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W) {
- startStats.acquireWaitCount = {W: 0};
- }
- if (!startStats.timeAcquiringMicros || !startStats.timeAcquiringMicros.W) {
- startStats.timeAcquiringMicros = {W: 0};
- }
+ // The server was just started, so initial stats may be missing.
+ if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W) {
+ startStats.acquireWaitCount = {W: 0};
+ }
+ if (!startStats.timeAcquiringMicros || !startStats.timeAcquiringMicros.W) {
+ startStats.timeAcquiringMicros = {W: 0};
+ }
- var acquireWaitCount = endStats.acquireWaitCount.W - startStats.acquireWaitCount.W;
- var blockedMillis =
- Math.floor((endStats.timeAcquiringMicros.W - startStats.timeAcquiringMicros.W) / 1000);
+ var acquireWaitCount = endStats.acquireWaitCount.W - startStats.acquireWaitCount.W;
+ var blockedMillis =
+ Math.floor((endStats.timeAcquiringMicros.W - startStats.timeAcquiringMicros.W) / 1000);
- // Require that no other commands run (and maybe acquire locks) in parallel.
- assert.eq(acquireWaitCount, 1, "other commands ran in parallel, can't check timing");
- assert.gte(blockedMillis, minBlockedMillis, "reported time acquiring lock is too low");
- assert.lte(blockedMillis, maxBlockedMillis, "reported time acquiring lock is too high");
- return ({
- blockedMillis: blockedMillis,
- minBlockedMillis: minBlockedMillis,
- maxBlockedMillis: maxBlockedMillis
- });
- }
+ // Require that no other commands run (and maybe acquire locks) in parallel.
+ assert.eq(acquireWaitCount, 1, "other commands ran in parallel, can't check timing");
+ assert.gte(blockedMillis, minBlockedMillis, "reported time acquiring lock is too low");
+ assert.lte(blockedMillis, maxBlockedMillis, "reported time acquiring lock is too high");
+ return ({
+ blockedMillis: blockedMillis,
+ minBlockedMillis: minBlockedMillis,
+ maxBlockedMillis: maxBlockedMillis
+ });
+}
- var conn = MongoRunner.runMongod();
- var db = conn.getDB('test');
- printjson([1, 10, 100, 500, 1000, 1500].map(testBlockTime));
- MongoRunner.stopMongod(conn);
+var conn = MongoRunner.runMongod();
+var db = conn.getDB('test');
+printjson([1, 10, 100, 500, 1000, 1500].map(testBlockTime));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/lock_stats_suboperation_curop.js b/jstests/noPassthrough/lock_stats_suboperation_curop.js
index 5d1b804d917..84e67ae6699 100644
--- a/jstests/noPassthrough/lock_stats_suboperation_curop.js
+++ b/jstests/noPassthrough/lock_stats_suboperation_curop.js
@@ -19,65 +19,65 @@
* @tags: [requires_fsync, requires_document_locking]
*/
(function() {
- 'use strict';
+'use strict';
- const conn = MongoRunner.runMongod();
- const db = conn.getDB('test');
- const coll = db.books;
- const blockedMillis = 2000;
- assert.commandWorked(coll.insert({title: 'Adventures of Huckleberry'}));
- assert.commandWorked(coll.insert({title: '1984'}));
- assert.commandWorked(coll.insert({title: 'Animal Farm'}));
- // Create the output collection beforehand so that $out will execute a code path which triggers
- // the index creation sub-operation.
- db['favorite'].createIndex({foo: 1});
+const conn = MongoRunner.runMongod();
+const db = conn.getDB('test');
+const coll = db.books;
+const blockedMillis = 2000;
+assert.commandWorked(coll.insert({title: 'Adventures of Huckleberry'}));
+assert.commandWorked(coll.insert({title: '1984'}));
+assert.commandWorked(coll.insert({title: 'Animal Farm'}));
+// Create the output collection beforehand so that $out will execute a code path which triggers
+// the index creation sub-operation.
+db['favorite'].createIndex({foo: 1});
- db.setProfilingLevel(0, -1);
+db.setProfilingLevel(0, -1);
- // Lock the database, and then start an operation that needs the lock, so it blocks.
- assert.commandWorked(db.fsyncLock());
+// Lock the database, and then start an operation that needs the lock, so it blocks.
+assert.commandWorked(db.fsyncLock());
- // Turn 'hangAfterStartingIndexBuildUnlocked' failpoint on, which blocks any index builds.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
+// Turn 'hangAfterStartingIndexBuildUnlocked' failpoint on, which blocks any index builds.
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
- // Aggregation with $out which will block on creating the temporary collection due to the
- // FsyncLock.
- const dollarOutAggregationShell = startParallelShell(function() {
- // Simple aggregation which copies a document to the output collection.
- assert.commandWorked(db.runCommand({
- aggregate: 'books',
- pipeline: [{$match: {title: '1984'}}, {$out: 'favorite'}],
- cursor: {}
- }));
- }, conn.port);
+// Aggregation with $out which will block on creating the temporary collection due to the
+// FsyncLock.
+const dollarOutAggregationShell = startParallelShell(function() {
+ // Simple aggregation which copies a document to the output collection.
+ assert.commandWorked(db.runCommand({
+ aggregate: 'books',
+ pipeline: [{$match: {title: '1984'}}, {$out: 'favorite'}],
+ cursor: {}
+ }));
+}, conn.port);
- // Wait for sub-operation createCollection to get blocked.
- assert.soon(function() {
- let res = db.currentOp({"command.create": {$exists: true}, waitingForLock: true});
- return res.inprog.length == 1;
- });
+// Wait for sub-operation createCollection to get blocked.
+assert.soon(function() {
+ let res = db.currentOp({"command.create": {$exists: true}, waitingForLock: true});
+ return res.inprog.length == 1;
+});
- sleep(blockedMillis);
+sleep(blockedMillis);
- // Unlock the database. Sub-operation createCollection can proceed.
- db.fsyncUnlock();
+// Unlock the database. Sub-operation createCollection can proceed.
+db.fsyncUnlock();
- // Wait for sub-operation createIndex to get blocked after acquiring all the locks.
- let res;
- assert.soon(function() {
- res = db.currentOp(
- {"command.createIndexes": {$exists: true}, "lockStats.Global": {$exists: true}});
- return res.inprog.length == 1;
- });
- jsTestLog(tojson(res.inprog[0]));
- // Assert that sub-operation 'createIndex' has 0 lock wait time. Before SERVER-26854, it
- // erroneously reported `blockedMillis` as it counted the lock wait time for the previous
- // sub-operation.
- assert(!('timeAcquiringMicros' in res.inprog[0].lockStats.Global));
+// Wait for sub-operation createIndex to get blocked after acquiring all the locks.
+let res;
+assert.soon(function() {
+ res = db.currentOp(
+ {"command.createIndexes": {$exists: true}, "lockStats.Global": {$exists: true}});
+ return res.inprog.length == 1;
+});
+jsTestLog(tojson(res.inprog[0]));
+// Assert that sub-operation 'createIndex' has 0 lock wait time. Before SERVER-26854, it
+// erroneously reported `blockedMillis` as it counted the lock wait time for the previous
+// sub-operation.
+assert(!('timeAcquiringMicros' in res.inprog[0].lockStats.Global));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'}));
- dollarOutAggregationShell();
- MongoRunner.stopMongod(conn);
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'}));
+dollarOutAggregationShell();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/lock_stats_suboperation_logs.js b/jstests/noPassthrough/lock_stats_suboperation_logs.js
index 3d533f1363d..cb56bfdc262 100644
--- a/jstests/noPassthrough/lock_stats_suboperation_logs.js
+++ b/jstests/noPassthrough/lock_stats_suboperation_logs.js
@@ -18,84 +18,84 @@
* @tags: [requires_fsync]
*/
(function() {
- 'use strict';
+'use strict';
- const conn = MongoRunner.runMongod();
- const db = conn.getDB('test');
- const coll = db.books;
- const blockedMillis = 2000;
- assert.commandWorked(coll.insert({title: 'Adventures of Huckleberry'}));
- assert.commandWorked(coll.insert({title: '1984'}));
- assert.commandWorked(coll.insert({title: 'Animal Farm'}));
- // The server will log every operation.
- db.setProfilingLevel(0, -1);
- // Create the output collection beforehand so that $out will execute a code path which triggers
- // the index creation sub-operation.
- db['favorite'].insert({foo: 1});
+const conn = MongoRunner.runMongod();
+const db = conn.getDB('test');
+const coll = db.books;
+const blockedMillis = 2000;
+assert.commandWorked(coll.insert({title: 'Adventures of Huckleberry'}));
+assert.commandWorked(coll.insert({title: '1984'}));
+assert.commandWorked(coll.insert({title: 'Animal Farm'}));
+// The server will log every operation.
+db.setProfilingLevel(0, -1);
+// Create the output collection beforehand so that $out will execute a code path which triggers
+// the index creation sub-operation.
+db['favorite'].insert({foo: 1});
- // Lock the database, and then start an operation that needs the lock, so it blocks.
- assert.commandWorked(db.fsyncLock());
+// Lock the database, and then start an operation that needs the lock, so it blocks.
+assert.commandWorked(db.fsyncLock());
- // Aggregation with $out which will block on creating the temporary collection due to the
- // FsyncLock.
- const dollarOutAggregationShell = startParallelShell(function() {
- // Simple aggregation which copies a document to the output collection.
- assert.commandWorked(db.runCommand({
- aggregate: 'books',
- pipeline: [{$match: {title: '1984'}}, {$out: 'favorite'}],
- cursor: {}
- }));
- }, conn.port);
+// Aggregation with $out which will block on creating the temporary collection due to the
+// FsyncLock.
+const dollarOutAggregationShell = startParallelShell(function() {
+ // Simple aggregation which copies a document to the output collection.
+ assert.commandWorked(db.runCommand({
+ aggregate: 'books',
+ pipeline: [{$match: {title: '1984'}}, {$out: 'favorite'}],
+ cursor: {}
+ }));
+}, conn.port);
- // Sub-operation createCollection starts to get blocked.
- assert.soon(function() {
- let res = db.currentOp({waitingForLock: true});
- return res.inprog.length == 1;
- });
+// Sub-operation createCollection starts to get blocked.
+assert.soon(function() {
+ let res = db.currentOp({waitingForLock: true});
+ return res.inprog.length == 1;
+});
- sleep(blockedMillis);
+sleep(blockedMillis);
- clearRawMongoProgramOutput();
- // Unlock the database. Sub-operation createCollection can proceed
- // and so do all the following sub-operations.
- db.fsyncUnlock();
+clearRawMongoProgramOutput();
+// Unlock the database. Sub-operation createCollection can proceed
+// and so do all the following sub-operations.
+db.fsyncUnlock();
- dollarOutAggregationShell();
- assert.eq(db['favorite'].count(), 1);
+dollarOutAggregationShell();
+assert.eq(db['favorite'].count(), 1);
- // Stopping the mongod also waits until all of its logs have been read by the mongo shell.
- MongoRunner.stopMongod(conn);
+// Stopping the mongod also waits until all of its logs have been read by the mongo shell.
+MongoRunner.stopMongod(conn);
- let mongodLogs = rawMongoProgramOutput();
- let lines = mongodLogs.split('\n');
- const lockWaitTimeRegex = /timeAcquiringMicros: { [wW]: ([0-9]+)/;
- let match;
- let firstOpWaitTime;
- let parentOpWaitTime;
- let numWaitedForLocks = 0;
+let mongodLogs = rawMongoProgramOutput();
+let lines = mongodLogs.split('\n');
+const lockWaitTimeRegex = /timeAcquiringMicros: { [wW]: ([0-9]+)/;
+let match;
+let firstOpWaitTime;
+let parentOpWaitTime;
+let numWaitedForLocks = 0;
- for (let line of lines) {
- if ((match = lockWaitTimeRegex.exec(line)) !== null) {
- let lockWaitTime = match[1];
- // Ignoring 'noise' lock stats from other operations such as locks taken during
- // validation stage.
- if (lockWaitTime < blockedMillis * 1000)
- continue;
- if (firstOpWaitTime === undefined)
- firstOpWaitTime = lockWaitTime;
- else
- parentOpWaitTime = lockWaitTime;
- numWaitedForLocks++;
- jsTestLog('Operation/Sub-operation log: ');
- jsTestLog(line);
- }
+for (let line of lines) {
+ if ((match = lockWaitTimeRegex.exec(line)) !== null) {
+ let lockWaitTime = match[1];
+ // Ignoring 'noise' lock stats from other operations such as locks taken during
+ // validation stage.
+ if (lockWaitTime < blockedMillis * 1000)
+ continue;
+ if (firstOpWaitTime === undefined)
+ firstOpWaitTime = lockWaitTime;
+ else
+ parentOpWaitTime = lockWaitTime;
+ numWaitedForLocks++;
+ jsTestLog('Operation/Sub-operation log: ');
+ jsTestLog(line);
}
+}
- // Only the logs of 'parent command' (aggregation with $out) and the first
- // sub-operation(createCollection) have the information about the long wait for the lock.
- assert.eq(numWaitedForLocks, 2);
+// Only the logs of 'parent command' (aggregation with $out) and the first
+// sub-operation(createCollection) have the information about the long wait for the lock.
+assert.eq(numWaitedForLocks, 2);
- // Total waiting time should be greater than or equal to the waiting time of the
- // first sub-operation.
- assert(parentOpWaitTime >= firstOpWaitTime);
+// Total waiting time should be greater than or equal to the waiting time of the
+// first sub-operation.
+assert(parentOpWaitTime >= firstOpWaitTime);
})();
diff --git a/jstests/noPassthrough/log_and_profile_query_hash.js b/jstests/noPassthrough/log_and_profile_query_hash.js
index 2a0757689a6..50395061e54 100644
--- a/jstests/noPassthrough/log_and_profile_query_hash.js
+++ b/jstests/noPassthrough/log_and_profile_query_hash.js
@@ -2,155 +2,161 @@
//
// Confirms that profiled find queries and corresponding logs have matching queryHashes.
(function() {
- "use strict";
-
- // For getLatestProfilerEntry().
- load("jstests/libs/profiler.js");
-
- // Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
- // of data logged for each op. For some of the testcases below, including the cluster time would
- // cause them to be truncated at the 512-byte RamLog limit, and some of the fields we need to
- // check would be lost.
- TestData.skipGossipingClusterTime = true;
-
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("jstests_query_shape_hash");
- const coll = testDB.test;
-
- const profileEntryFilter = {op: "query"};
-
- assert.commandWorked(testDB.setProfilingLevel(2, {"slowms": 0}));
- assert.commandWorked(testDB.setLogLevel(0, "query"));
-
- // Parses the logLine and profileEntry into similar string representations with no white spaces.
- // Returns true if the logLine command components correspond to the profile entry. This is
- // sufficient for the purpose of testing query hashes.
- function logMatchesEntry(logLine, profileEntry) {
- if (logLine.indexOf("command: find { find: \"test\"") >= 0 &&
- logLine.indexOf(profileEntry["command"]["comment"]) >= 0) {
- return true;
- }
- return false;
- }
-
- // Fetch the log line that corresponds to the profile entry. If there is no such line, return
- // null.
- function retrieveLogLine(log, profileEntry) {
- const logLine = log.reduce((acc, line) => {
- if (logMatchesEntry(line, profileEntry)) {
- // Assert that the matching does not pick up more than one line corresponding to
- // the entry.
- assert.eq(acc, null);
- return line;
- }
- return acc;
- }, null);
- return logLine;
+"use strict";
+
+// For getLatestProfilerEntry().
+load("jstests/libs/profiler.js");
+
+// Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
+// of data logged for each op. For some of the testcases below, including the cluster time would
+// cause them to be truncated at the 512-byte RamLog limit, and some of the fields we need to
+// check would be lost.
+TestData.skipGossipingClusterTime = true;
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+const testDB = conn.getDB("jstests_query_shape_hash");
+const coll = testDB.test;
+
+const profileEntryFilter = {
+ op: "query"
+};
+
+assert.commandWorked(testDB.setProfilingLevel(2, {"slowms": 0}));
+assert.commandWorked(testDB.setLogLevel(0, "query"));
+
+// Parses the logLine and profileEntry into similar string representations with no white spaces.
+// Returns true if the logLine command components correspond to the profile entry. This is
+// sufficient for the purpose of testing query hashes.
+function logMatchesEntry(logLine, profileEntry) {
+ if (logLine.indexOf("command: find { find: \"test\"") >= 0 &&
+ logLine.indexOf(profileEntry["command"]["comment"]) >= 0) {
+ return true;
}
-
- // Run the find command, retrieve the corresponding profile object and log line, then ensure
- // that both the profile object and log line have matching stable query hashes (if any).
- function runTestsAndGetHashes(db, {comment, test, hasQueryHash}) {
- assert.commandWorked(db.adminCommand({clearLog: "global"}));
- assert.doesNotThrow(() => test(db, comment));
- const log = assert.commandWorked(db.adminCommand({getLog: "global"})).log;
- const profileEntry =
- getLatestProfilerEntry(testDB, {op: "query", "command.comment": comment});
- // Parse the profile entry to retrieve the corresponding log entry.
- const logLine = retrieveLogLine(log, profileEntry);
- assert.neq(logLine, null);
-
- // Confirm that the query hashes either exist or don't exist in both log and profile
- // entries. If the queryHash and planCacheKey exist, ensure that the hashes from the
- // profile entry match the log line.
- assert.eq(hasQueryHash, profileEntry.hasOwnProperty("queryHash"));
- assert.eq(hasQueryHash, profileEntry.hasOwnProperty("planCacheKey"));
- assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["queryHash"]) >= 0));
- assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["planCacheKey"]) >= 0));
- if (hasQueryHash) {
- return {
- queryHash: profileEntry["queryHash"],
- planCacheKey: profileEntry["planCacheKey"]
- };
+ return false;
+}
+
+// Fetch the log line that corresponds to the profile entry. If there is no such line, return
+// null.
+function retrieveLogLine(log, profileEntry) {
+ const logLine = log.reduce((acc, line) => {
+ if (logMatchesEntry(line, profileEntry)) {
+ // Assert that the matching does not pick up more than one line corresponding to
+ // the entry.
+ assert.eq(acc, null);
+ return line;
}
- return null;
+ return acc;
+ }, null);
+ return logLine;
+}
+
+// Run the find command, retrieve the corresponding profile object and log line, then ensure
+// that both the profile object and log line have matching stable query hashes (if any).
+function runTestsAndGetHashes(db, {comment, test, hasQueryHash}) {
+ assert.commandWorked(db.adminCommand({clearLog: "global"}));
+ assert.doesNotThrow(() => test(db, comment));
+ const log = assert.commandWorked(db.adminCommand({getLog: "global"})).log;
+ const profileEntry = getLatestProfilerEntry(testDB, {op: "query", "command.comment": comment});
+ // Parse the profile entry to retrieve the corresponding log entry.
+ const logLine = retrieveLogLine(log, profileEntry);
+ assert.neq(logLine, null);
+
+ // Confirm that the query hashes either exist or don't exist in both log and profile
+ // entries. If the queryHash and planCacheKey exist, ensure that the hashes from the
+ // profile entry match the log line.
+ assert.eq(hasQueryHash, profileEntry.hasOwnProperty("queryHash"));
+ assert.eq(hasQueryHash, profileEntry.hasOwnProperty("planCacheKey"));
+ assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["queryHash"]) >= 0));
+ assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["planCacheKey"]) >= 0));
+ if (hasQueryHash) {
+ return {queryHash: profileEntry["queryHash"], planCacheKey: profileEntry["planCacheKey"]};
}
-
- // Add data and indices.
- const nDocs = 200;
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({a: i, b: -1, c: 1}));
- }
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
-
- const queryA = {a: {$gte: 3}, b: 32};
- const queryB = {a: {$gte: 199}, b: -1};
- const projectionB = {_id: 0, b: 1};
- const sortC = {c: -1};
-
- const testList = [
- {
- comment: "Test0 find query",
- test: function(db, comment) {
- assert.eq(200, db.test.find().comment(comment).itcount());
- },
- hasQueryHash: false
+ return null;
+}
+
+// Add data and indices.
+const nDocs = 200;
+for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({a: i, b: -1, c: 1}));
+}
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+
+const queryA = {
+ a: {$gte: 3},
+ b: 32
+};
+const queryB = {
+ a: {$gte: 199},
+ b: -1
+};
+const projectionB = {
+ _id: 0,
+ b: 1
+};
+const sortC = {
+ c: -1
+};
+
+const testList = [
+ {
+ comment: "Test0 find query",
+ test: function(db, comment) {
+ assert.eq(200, db.test.find().comment(comment).itcount());
},
- {
- comment: "Test1 find query",
- test: function(db, comment) {
- assert.eq(1,
- db.test.find(queryB, projectionB).sort(sortC).comment(comment).itcount(),
- 'unexpected document count');
- },
- hasQueryHash: true
+ hasQueryHash: false
+ },
+ {
+ comment: "Test1 find query",
+ test: function(db, comment) {
+ assert.eq(1,
+ db.test.find(queryB, projectionB).sort(sortC).comment(comment).itcount(),
+ 'unexpected document count');
},
- {
- comment: "Test2 find query",
- test: function(db, comment) {
- assert.eq(0,
- db.test.find(queryA, projectionB).sort(sortC).comment(comment).itcount(),
- 'unexpected document count');
- },
- hasQueryHash: true
- }
- ];
-
- const hashValues = testList.map((testCase) => runTestsAndGetHashes(testDB, testCase));
-
- // Confirm that the same shape of query has the same hashes.
- assert.neq(hashValues[0], hashValues[1]);
- assert.eq(hashValues[1], hashValues[2]);
-
- // Test that the expected 'planCacheKey' and 'queryHash' are included in the transitional
- // log lines when an inactive cache entry is created.
- assert.commandWorked(testDB.setLogLevel(1, "query"));
- const testInactiveCreationLog = {
- comment: "Test Creating inactive entry.",
+ hasQueryHash: true
+ },
+ {
+ comment: "Test2 find query",
test: function(db, comment) {
assert.eq(0,
- db.test.find({b: {$lt: 12}, a: {$eq: 500}})
- .sort({a: -1})
- .comment(comment)
- .itcount(),
+ db.test.find(queryA, projectionB).sort(sortC).comment(comment).itcount(),
'unexpected document count');
},
hasQueryHash: true
-
- };
- const onCreationHashes = runTestsAndGetHashes(testDB, testInactiveCreationLog);
- const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log;
-
- // Fetch the line that logs when an inactive cache entry is created for the query with
- // 'planCacheKey' and 'queryHash'. Confirm only one line does this.
- const creationLogList = log.filter(
- logLine =>
- (logLine.indexOf("Creating inactive cache entry for query shape query") != -1 &&
- logLine.indexOf("planCacheKey " + String(onCreationHashes.planCacheKey)) != -1 &&
- logLine.indexOf("queryHash " + String(onCreationHashes.queryHash)) != -1));
- assert.eq(1, creationLogList.length);
-
- MongoRunner.stopMongod(conn);
+ }
+];
+
+const hashValues = testList.map((testCase) => runTestsAndGetHashes(testDB, testCase));
+
+// Confirm that the same shape of query has the same hashes.
+assert.neq(hashValues[0], hashValues[1]);
+assert.eq(hashValues[1], hashValues[2]);
+
+// Test that the expected 'planCacheKey' and 'queryHash' are included in the transitional
+// log lines when an inactive cache entry is created.
+assert.commandWorked(testDB.setLogLevel(1, "query"));
+const testInactiveCreationLog = {
+ comment: "Test Creating inactive entry.",
+ test: function(db, comment) {
+ assert.eq(
+ 0,
+ db.test.find({b: {$lt: 12}, a: {$eq: 500}}).sort({a: -1}).comment(comment).itcount(),
+ 'unexpected document count');
+ },
+ hasQueryHash: true
+
+};
+const onCreationHashes = runTestsAndGetHashes(testDB, testInactiveCreationLog);
+const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log;
+
+// Fetch the line that logs when an inactive cache entry is created for the query with
+// 'planCacheKey' and 'queryHash'. Confirm only one line does this.
+const creationLogList = log.filter(
+ logLine => (logLine.indexOf("Creating inactive cache entry for query shape query") != -1 &&
+ logLine.indexOf("planCacheKey " + String(onCreationHashes.planCacheKey)) != -1 &&
+ logLine.indexOf("queryHash " + String(onCreationHashes.queryHash)) != -1));
+assert.eq(1, creationLogList.length);
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/log_find_getmore.js b/jstests/noPassthrough/log_find_getmore.js
index 94447948632..dc7f6d83c91 100644
--- a/jstests/noPassthrough/log_find_getmore.js
+++ b/jstests/noPassthrough/log_find_getmore.js
@@ -4,168 +4,166 @@
* @tags: [requires_profiling]
*/
(function() {
- "use strict";
+"use strict";
- // For checkLog and getLatestProfilerEntry.
- load("jstests/libs/check_log.js");
- load("jstests/libs/profiler.js");
+// For checkLog and getLatestProfilerEntry.
+load("jstests/libs/check_log.js");
+load("jstests/libs/profiler.js");
- function assertLogLineContains(conn, parts) {
- if (typeof(parts) == 'string') {
- return assertLogLineContains(conn, [parts]);
- }
- assert.soon(function() {
- const logLines = checkLog.getGlobalLog(conn);
- let foundAll = false;
- for (let l = 0; l < logLines.length && !foundAll; l++) {
- for (let p = 0; p < parts.length; p++) {
- if (logLines[l].indexOf(parts[p]) == -1) {
- break;
- }
- foundAll = (p == parts.length - 1);
+function assertLogLineContains(conn, parts) {
+ if (typeof (parts) == 'string') {
+ return assertLogLineContains(conn, [parts]);
+ }
+ assert.soon(function() {
+ const logLines = checkLog.getGlobalLog(conn);
+ let foundAll = false;
+ for (let l = 0; l < logLines.length && !foundAll; l++) {
+ for (let p = 0; p < parts.length; p++) {
+ if (logLines[l].indexOf(parts[p]) == -1) {
+ break;
}
+ foundAll = (p == parts.length - 1);
}
- return foundAll;
- }, "failed to find log line containing all of " + tojson(parts));
- print("FOUND: " + tojsononeline(parts));
- }
+ }
+ return foundAll;
+ }, "failed to find log line containing all of " + tojson(parts));
+ print("FOUND: " + tojsononeline(parts));
+}
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("log_getmore");
- const coll = testDB.test;
+const testDB = conn.getDB("log_getmore");
+const coll = testDB.test;
- assert.commandWorked(testDB.dropDatabase());
+assert.commandWorked(testDB.dropDatabase());
- for (let i = 1; i <= 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
+for (let i = 1; i <= 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
- assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
- // Set the diagnostic logging threshold to capture all operations, and enable profiling so that
- // we can easily retrieve cursor IDs in all cases.
- assert.commandWorked(testDB.setProfilingLevel(2, -1));
+// Set the diagnostic logging threshold to capture all operations, and enable profiling so that
+// we can easily retrieve cursor IDs in all cases.
+assert.commandWorked(testDB.setProfilingLevel(2, -1));
- //
- // Command tests.
- //
- testDB.getMongo().forceReadMode("commands");
+//
+// Command tests.
+//
+testDB.getMongo().forceReadMode("commands");
- // TEST: Verify the log format of the find command.
- let cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
- cursor.next(); // Perform initial query and retrieve first document in batch.
+// TEST: Verify the log format of the find command.
+let cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
+cursor.next(); // Perform initial query and retrieve first document in batch.
- let cursorid = getLatestProfilerEntry(testDB).cursorid;
+let cursorid = getLatestProfilerEntry(testDB).cursorid;
- let logLine =
- 'command log_getmore.test appName: "MongoDB Shell" command: find { find: "test", filter:' +
- ' { a: { $gt: 0.0 } }, skip: 1.0, batchSize: 5.0, limit: 10.0, singleBatch: false, sort:' +
- ' { a: 1.0 }, hint: { a: 1.0 }';
+let logLine =
+ 'command log_getmore.test appName: "MongoDB Shell" command: find { find: "test", filter:' +
+ ' { a: { $gt: 0.0 } }, skip: 1.0, batchSize: 5.0, limit: 10.0, singleBatch: false, sort:' +
+ ' { a: 1.0 }, hint: { a: 1.0 }';
- // Check the logs to verify that find appears as above.
- assertLogLineContains(conn, logLine);
+// Check the logs to verify that find appears as above.
+assertLogLineContains(conn, logLine);
- // TEST: Verify the log format of a getMore command following a find command.
+// TEST: Verify the log format of a getMore command following a find command.
- assert.eq(cursor.itcount(), 8); // Iterate the cursor established above to trigger getMore.
+assert.eq(cursor.itcount(), 8); // Iterate the cursor established above to trigger getMore.
- /**
- * Be sure to avoid rounding errors when converting a cursor ID to a string, since converting a
- * NumberLong to a string may not preserve all digits.
- */
- function cursorIdToString(cursorId) {
- let cursorIdString = cursorId.toString();
- if (cursorIdString.indexOf("NumberLong") === -1) {
- return cursorIdString;
- }
- return cursorIdString.substring("NumberLong(\"".length,
- cursorIdString.length - "\")".length);
+/**
+ * Be sure to avoid rounding errors when converting a cursor ID to a string, since converting a
+ * NumberLong to a string may not preserve all digits.
+ */
+function cursorIdToString(cursorId) {
+ let cursorIdString = cursorId.toString();
+ if (cursorIdString.indexOf("NumberLong") === -1) {
+ return cursorIdString;
}
+ return cursorIdString.substring("NumberLong(\"".length, cursorIdString.length - "\")".length);
+}
- logLine = [
- 'command log_getmore.test appName: "MongoDB Shell" command: getMore { getMore: ' +
- cursorIdToString(cursorid) + ', collection: "test", batchSize: 5.0',
- 'originatingCommand: { find: "test", ' +
- 'filter: { a: { $gt: 0.0 } }, skip: 1.0, batchSize: 5.0, limit: 10.0, singleBatch: ' +
- 'false, sort: { a: 1.0 }, hint: { a: 1.0 }'
- ];
+logLine = [
+ 'command log_getmore.test appName: "MongoDB Shell" command: getMore { getMore: ' +
+ cursorIdToString(cursorid) + ', collection: "test", batchSize: 5.0',
+ 'originatingCommand: { find: "test", ' +
+ 'filter: { a: { $gt: 0.0 } }, skip: 1.0, batchSize: 5.0, limit: 10.0, singleBatch: ' +
+ 'false, sort: { a: 1.0 }, hint: { a: 1.0 }'
+];
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- // TEST: Verify the log format of a getMore command following an aggregation.
- cursor = coll.aggregate([{$match: {a: {$gt: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
- cursorid = getLatestProfilerEntry(testDB).cursorid;
+// TEST: Verify the log format of a getMore command following an aggregation.
+cursor = coll.aggregate([{$match: {a: {$gt: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
+cursorid = getLatestProfilerEntry(testDB).cursorid;
- assert.eq(cursor.itcount(), 10);
+assert.eq(cursor.itcount(), 10);
- logLine = [
- 'command log_getmore.test appName: "MongoDB Shell" command: getMore { getMore: ' +
- cursorIdToString(cursorid) + ', collection: "test"',
- 'originatingCommand: { aggregate: "test", pipeline: ' +
- '[ { $match: { a: { $gt: 0.0 } } } ], cursor: { batchSize: 0.0 }, hint: { a: 1.0 }'
- ];
+logLine = [
+ 'command log_getmore.test appName: "MongoDB Shell" command: getMore { getMore: ' +
+ cursorIdToString(cursorid) + ', collection: "test"',
+ 'originatingCommand: { aggregate: "test", pipeline: ' +
+ '[ { $match: { a: { $gt: 0.0 } } } ], cursor: { batchSize: 0.0 }, hint: { a: 1.0 }'
+];
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- //
- // Legacy tests.
- //
- testDB.getMongo().forceReadMode("legacy");
+//
+// Legacy tests.
+//
+testDB.getMongo().forceReadMode("legacy");
- // TEST: Verify the log format of a legacy find. This should be upconverted to resemble a find
- // command.
- cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
- cursor.next();
+// TEST: Verify the log format of a legacy find. This should be upconverted to resemble a find
+// command.
+cursor = coll.find({a: {$gt: 0}}).sort({a: 1}).skip(1).limit(10).hint({a: 1}).batchSize(5);
+cursor.next();
- cursorid = getLatestProfilerEntry(testDB).cursorid;
+cursorid = getLatestProfilerEntry(testDB).cursorid;
- logLine =
- 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { a: ' +
- '{ $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
+logLine = 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { a: ' +
+ '{ $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- // TEST: Verify that a query whose filter contains a field named 'query' appears as expected in
- // the logs. This test ensures that upconverting a legacy query correctly identifies this as a
- // user field rather than a wrapped filter spec.
- coll.find({query: "foo"}).itcount();
+// TEST: Verify that a query whose filter contains a field named 'query' appears as expected in
+// the logs. This test ensures that upconverting a legacy query correctly identifies this as a
+// user field rather than a wrapped filter spec.
+coll.find({query: "foo"}).itcount();
- logLine =
- 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { query:' +
- ' "foo" } }';
+logLine =
+ 'query log_getmore.test appName: "MongoDB Shell" command: { find: "test", filter: { query:' +
+ ' "foo" } }';
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- // TEST: Verify that a legacy getMore following a find is logged in the expected format. This
- // should be upconverted to resemble a getMore command, with the preceding upconverted legacy
- // find in the originatingCommand field.
+// TEST: Verify that a legacy getMore following a find is logged in the expected format. This
+// should be upconverted to resemble a getMore command, with the preceding upconverted legacy
+// find in the originatingCommand field.
- assert.eq(cursor.itcount(), 8); // Iterate the cursor established above to trigger getMore.
+assert.eq(cursor.itcount(), 8); // Iterate the cursor established above to trigger getMore.
- logLine = 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
- cursorIdToString(cursorid) +
- ', collection: "test", batchSize: 5 } originatingCommand: { find: "test", filter: { a: {' +
- ' $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
+logLine = 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
+ cursorIdToString(cursorid) +
+ ', collection: "test", batchSize: 5 } originatingCommand: { find: "test", filter: { a: {' +
+ ' $gt: 0.0 } }, skip: 1, ntoreturn: 5, sort: { a: 1.0 }, hint: { a: 1.0 }';
- assertLogLineContains(conn, logLine);
+assertLogLineContains(conn, logLine);
- // TEST: Verify that a legacy getMore following an aggregation is logged in the expected format.
- // This should be upconverted to resemble a getMore command, with the preceding aggregation in
- // the originatingCommand field.
- cursor = coll.aggregate([{$match: {a: {$gt: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
- cursorid = getLatestProfilerEntry(testDB).cursorid;
+// TEST: Verify that a legacy getMore following an aggregation is logged in the expected format.
+// This should be upconverted to resemble a getMore command, with the preceding aggregation in
+// the originatingCommand field.
+cursor = coll.aggregate([{$match: {a: {$gt: 0}}}], {cursor: {batchSize: 0}, hint: {a: 1}});
+cursorid = getLatestProfilerEntry(testDB).cursorid;
- assert.eq(cursor.itcount(), 10);
+assert.eq(cursor.itcount(), 10);
- logLine = [
- 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
- cursorIdToString(cursorid) + ', collection: "test", batchSize: 0',
- 'originatingCommand: { aggregate: "test", pipeline:' +
- ' [ { $match: { a: { $gt: 0.0 } } } ], cursor: { batchSize: 0.0 }, hint: { a: 1.0 }'
- ];
+logLine = [
+ 'getmore log_getmore.test appName: "MongoDB Shell" command: { getMore: ' +
+ cursorIdToString(cursorid) + ', collection: "test", batchSize: 0',
+ 'originatingCommand: { aggregate: "test", pipeline:' +
+ ' [ { $match: { a: { $gt: 0.0 } } } ], cursor: { batchSize: 0.0 }, hint: { a: 1.0 }'
+];
- assertLogLineContains(conn, logLine);
- MongoRunner.stopMongod(conn);
+assertLogLineContains(conn, logLine);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
index 27a06a1ecec..0b4cda5794e 100644
--- a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
+++ b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
@@ -5,252 +5,253 @@
* @tags: [requires_replication, requires_sharding]
*/
(function() {
- "use strict";
-
- // This test looks for exact matches in log output, which does not account for implicit
- // sessions.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/libs/check_log.js"); // For formatAsLogLine.
-
- // Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
- // of data logged for each op. For some of the testcases below, including the cluster time would
- // cause them to be truncated at the 512-byte RamLog limit, and some of the fields we need to
- // check would be lost.
- TestData.skipGossipingClusterTime = true;
-
- // Set up a 2-shard single-node replicaset cluster.
- const stParams = {name: jsTestName(), shards: 2, rs: {nodes: 1}};
- const st = new ShardingTest(stParams);
-
- // Obtain one mongoS connection and a second direct to the shard.
- const shardConn = st.rs0.getPrimary();
- const mongosConn = st.s;
-
- const dbName = "logtest";
-
- const mongosDB = mongosConn.getDB(dbName);
- const shardDB = shardConn.getDB(dbName);
-
- // Enable sharding on the the test database and ensure that the primary is on shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), shardConn.name);
-
- // Drops and re-shards the test collection, then splits at {_id: 0} and moves the upper chunk to
- // the second shard.
- function dropAndRecreateTestCollection() {
- assert(mongosDB.test.drop());
- st.shardColl(mongosDB.test, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName(), true);
- }
-
- // Configures logging parameters on the target environment, constructs a list of test operations
- // depending on the deployment type, runs each of these in turn, and searches the logs for the
- // corresponding output. Returns a pair of arrays [testsRun, logLines]; the former is the set of
- // test cases that were run, while the latter contains the logline for each test, or null if no
- // such logline was found.
- function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
- dropAndRecreateTestCollection();
-
- const coll = db.test;
-
- // Transparently handles assert.writeOK for legacy writes.
- function assertWriteOK(writeResponse) {
- if (!writeResponse) {
- assert(db.getMongo().writeMode !== "commands");
- assert(db.runCommand({getLastError: 1}).err == null);
- } else {
- assert.commandWorked(writeResponse);
- }
+"use strict";
+
+// This test looks for exact matches in log output, which does not account for implicit
+// sessions.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/check_log.js"); // For formatAsLogLine.
+
+// Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
+// of data logged for each op. For some of the testcases below, including the cluster time would
+// cause them to be truncated at the 512-byte RamLog limit, and some of the fields we need to
+// check would be lost.
+TestData.skipGossipingClusterTime = true;
+
+// Set up a 2-shard single-node replicaset cluster.
+const stParams = {
+ name: jsTestName(),
+ shards: 2,
+ rs: {nodes: 1}
+};
+const st = new ShardingTest(stParams);
+
+// Obtain one mongoS connection and a second direct to the shard.
+const shardConn = st.rs0.getPrimary();
+const mongosConn = st.s;
+
+const dbName = "logtest";
+
+const mongosDB = mongosConn.getDB(dbName);
+const shardDB = shardConn.getDB(dbName);
+
+// Enable sharding on the the test database and ensure that the primary is on shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), shardConn.name);
+
+// Drops and re-shards the test collection, then splits at {_id: 0} and moves the upper chunk to
+// the second shard.
+function dropAndRecreateTestCollection() {
+ assert(mongosDB.test.drop());
+ st.shardColl(mongosDB.test, {_id: 1}, {_id: 0}, {_id: 1}, mongosDB.getName(), true);
+}
+
+// Configures logging parameters on the target environment, constructs a list of test operations
+// depending on the deployment type, runs each of these in turn, and searches the logs for the
+// corresponding output. Returns a pair of arrays [testsRun, logLines]; the former is the set of
+// test cases that were run, while the latter contains the logline for each test, or null if no
+// such logline was found.
+function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
+ dropAndRecreateTestCollection();
+
+ const coll = db.test;
+
+ // Transparently handles assert.writeOK for legacy writes.
+ function assertWriteOK(writeResponse) {
+ if (!writeResponse) {
+ assert(db.getMongo().writeMode !== "commands");
+ assert(db.runCommand({getLastError: 1}).err == null);
+ } else {
+ assert.commandWorked(writeResponse);
}
+ }
- for (let i = 1; i <= 5; ++i) {
- assertWriteOK(coll.insert({_id: i, a: i, loc: {type: "Point", coordinates: [i, i]}}));
- assertWriteOK(
- coll.insert({_id: -i, a: -i, loc: {type: "Point", coordinates: [-i, -i]}}));
- }
- assertWriteOK(coll.createIndex({loc: "2dsphere"}));
-
- const isMongos = FixtureHelpers.isMongos(db);
-
- // Set the shell read/write mode.
- db.getMongo().forceWriteMode(readWriteMode);
- db.getMongo().forceReadMode(readWriteMode);
-
- // Build a string that identifies the parameters of this test run. Individual ops will
- // use this string as their comment where applicable, and we also print it to the logs.
- const logFormatTestComment = (isMongos ? 'mongos' : 'mongod') + "_" + readWriteMode +
- "_slowms:" + slowMs + "_logLevel:" + logLevel + "_sampleRate:" + sampleRate;
- jsTestLog(logFormatTestComment);
-
- // Set all logging parameters. If slowMs is null, we set a high threshold here so that
- // logLevel can be tested in cases where operations should not otherwise be logged.
- assert.commandWorked(db.adminCommand(
- {profile: 0, slowms: (slowMs == null) ? 1000000 : slowMs, sampleRate: sampleRate}));
- assert.commandWorked(db.setLogLevel(logLevel, "command"));
- assert.commandWorked(db.setLogLevel(logLevel, "write"));
-
- // Certain fields in the log lines on mongoD are not applicable in their counterparts on
- // mongoS, and vice-versa. Ignore these fields when examining the logs of an instance on
- // which we do not expect them to appear.
- const ignoreFields =
+ for (let i = 1; i <= 5; ++i) {
+ assertWriteOK(coll.insert({_id: i, a: i, loc: {type: "Point", coordinates: [i, i]}}));
+ assertWriteOK(coll.insert({_id: -i, a: -i, loc: {type: "Point", coordinates: [-i, -i]}}));
+ }
+ assertWriteOK(coll.createIndex({loc: "2dsphere"}));
+
+ const isMongos = FixtureHelpers.isMongos(db);
+
+ // Set the shell read/write mode.
+ db.getMongo().forceWriteMode(readWriteMode);
+ db.getMongo().forceReadMode(readWriteMode);
+
+ // Build a string that identifies the parameters of this test run. Individual ops will
+ // use this string as their comment where applicable, and we also print it to the logs.
+ const logFormatTestComment = (isMongos ? 'mongos' : 'mongod') + "_" + readWriteMode +
+ "_slowms:" + slowMs + "_logLevel:" + logLevel + "_sampleRate:" + sampleRate;
+ jsTestLog(logFormatTestComment);
+
+ // Set all logging parameters. If slowMs is null, we set a high threshold here so that
+ // logLevel can be tested in cases where operations should not otherwise be logged.
+ assert.commandWorked(db.adminCommand(
+ {profile: 0, slowms: (slowMs == null) ? 1000000 : slowMs, sampleRate: sampleRate}));
+ assert.commandWorked(db.setLogLevel(logLevel, "command"));
+ assert.commandWorked(db.setLogLevel(logLevel, "write"));
+
+ // Certain fields in the log lines on mongoD are not applicable in their counterparts on
+ // mongoS, and vice-versa. Ignore these fields when examining the logs of an instance on
+ // which we do not expect them to appear.
+ const ignoreFields =
(isMongos
? ["docsExamined", "keysExamined", "keysInserted", "keysDeleted", "planSummary",
"usedDisk", "hasSortStage"]
: ["nShards"]);
- // Legacy operations do not produce a 'command: <name>' field in the log.
- if (readWriteMode === "legacy") {
- ignoreFields.push("command");
- }
+ // Legacy operations do not produce a 'command: <name>' field in the log.
+ if (readWriteMode === "legacy") {
+ ignoreFields.push("command");
+ }
- function confirmLogContents(db, {test, logFields}, testIndex) {
- // Clear the log before running the test, to guarantee that we do not match against any
- // similar tests which may have run previously.
- assert.commandWorked(db.adminCommand({clearLog: "global"}));
+ function confirmLogContents(db, {test, logFields}, testIndex) {
+ // Clear the log before running the test, to guarantee that we do not match against any
+ // similar tests which may have run previously.
+ assert.commandWorked(db.adminCommand({clearLog: "global"}));
- // Run the given command in order to generate a log line. If slowMs is non-null and
- // greater than 0, apply that slowMs to every second test.
- if (slowMs != null && slowMs > 0) {
- db.adminCommand({profile: 0, slowms: (testIndex % 2 ? slowMs : -1)});
- }
- assert.doesNotThrow(() => test(db));
-
- // Confirm whether the operation was logged or not.
- const globalLog = assert.commandWorked(db.adminCommand({getLog: "global"}));
- return findMatchingLogLine(globalLog.log, logFields, ignoreFields);
+ // Run the given command in order to generate a log line. If slowMs is non-null and
+ // greater than 0, apply that slowMs to every second test.
+ if (slowMs != null && slowMs > 0) {
+ db.adminCommand({profile: 0, slowms: (testIndex % 2 ? slowMs : -1)});
}
+ assert.doesNotThrow(() => test(db));
+
+ // Confirm whether the operation was logged or not.
+ const globalLog = assert.commandWorked(db.adminCommand({getLog: "global"}));
+ return findMatchingLogLine(globalLog.log, logFields, ignoreFields);
+ }
- //
- // Defines the set of test operations and associated log output fields.
- //
- const testList = [
- {
- test: function(db) {
- assert.eq(db.test
- .aggregate([{$match: {a: 1}}], {
- comment: logFormatTestComment,
- collation: {locale: "fr"},
- hint: {_id: 1},
- })
- .itcount(),
- 1);
- },
- logFields: {
- command: "aggregate",
- aggregate: coll.getName(),
- pipeline: [{$match: {a: 1}}],
- comment: logFormatTestComment,
- collation: {locale: "fr"},
- hint: {_id: 1},
- planSummary: "IXSCAN { _id: 1 }",
- cursorExhausted: 1,
- docsExamined: 10,
- keysExamined: 10,
- nreturned: 1,
- nShards: stParams.shards
- }
+ //
+ // Defines the set of test operations and associated log output fields.
+ //
+ const testList = [
+ {
+ test: function(db) {
+ assert.eq(db.test
+ .aggregate([{$match: {a: 1}}], {
+ comment: logFormatTestComment,
+ collation: {locale: "fr"},
+ hint: {_id: 1},
+ })
+ .itcount(),
+ 1);
},
- {
- test: function(db) {
- assert.eq(db.test.find({a: 1, $comment: logFormatTestComment})
- .collation({locale: "fr"})
- .count(),
- 1);
- },
- logFields: {
- command: "count",
- count: coll.getName(),
- query: {a: 1, $comment: logFormatTestComment},
- collation: {locale: "fr"},
- planSummary: "COLLSCAN"
- }
+ logFields: {
+ command: "aggregate",
+ aggregate: coll.getName(),
+ pipeline: [{$match: {a: 1}}],
+ comment: logFormatTestComment,
+ collation: {locale: "fr"},
+ hint: {_id: 1},
+ planSummary: "IXSCAN { _id: 1 }",
+ cursorExhausted: 1,
+ docsExamined: 10,
+ keysExamined: 10,
+ nreturned: 1,
+ nShards: stParams.shards
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(db.test.find({a: 1, $comment: logFormatTestComment})
+ .collation({locale: "fr"})
+ .count(),
+ 1);
},
- {
- test: function(db) {
- assert.eq(
- db.test.distinct(
- "a", {a: 1, $comment: logFormatTestComment}, {collation: {locale: "fr"}}),
- [1]);
- },
- logFields: {
- command: "distinct",
- distinct: coll.getName(),
- query: {a: 1, $comment: logFormatTestComment},
- planSummary: "COLLSCAN",
- $comment: logFormatTestComment,
- collation: {locale: "fr"}
- }
+ logFields: {
+ command: "count",
+ count: coll.getName(),
+ query: {a: 1, $comment: logFormatTestComment},
+ collation: {locale: "fr"},
+ planSummary: "COLLSCAN"
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(
+ db.test.distinct(
+ "a", {a: 1, $comment: logFormatTestComment}, {collation: {locale: "fr"}}),
+ [1]);
},
- {
- test: function(db) {
- assert.eq(db.test.find({_id: 1}).comment(logFormatTestComment).itcount(), 1);
- },
- logFields: {
- command: "find",
- find: coll.getName(),
- comment: logFormatTestComment,
- planSummary: "IDHACK",
- cursorExhausted: 1,
- keysExamined: 1,
- docsExamined: 1,
- nreturned: 1,
- nShards: 1
- }
+ logFields: {
+ command: "distinct",
+ distinct: coll.getName(),
+ query: {a: 1, $comment: logFormatTestComment},
+ planSummary: "COLLSCAN",
+ $comment: logFormatTestComment,
+ collation: {locale: "fr"}
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(db.test.find({_id: 1}).comment(logFormatTestComment).itcount(), 1);
},
- {
- test: function(db) {
- assert.eq(db.test.findAndModify({
- query: {_id: 1, a: 1, $comment: logFormatTestComment},
- update: {$inc: {b: 1}},
- collation: {locale: "fr"}
- }),
- {_id: 1, a: 1, loc: {type: "Point", coordinates: [1, 1]}});
- },
- // TODO SERVER-34208: display FAM update metrics in mongoS logs.
- logFields: Object.assign((isMongos ? {} : {nMatched: 1, nModified: 1}), {
- command: "findAndModify",
- findandmodify: coll.getName(),
- planSummary: "IXSCAN { _id: 1 }",
- keysExamined: 1,
- docsExamined: 1,
- $comment: logFormatTestComment,
- collation: {locale: "fr"}
- })
+ logFields: {
+ command: "find",
+ find: coll.getName(),
+ comment: logFormatTestComment,
+ planSummary: "IDHACK",
+ cursorExhausted: 1,
+ keysExamined: 1,
+ docsExamined: 1,
+ nreturned: 1,
+ nShards: 1
+ }
+ },
+ {
+ test: function(db) {
+ assert.eq(db.test.findAndModify({
+ query: {_id: 1, a: 1, $comment: logFormatTestComment},
+ update: {$inc: {b: 1}},
+ collation: {locale: "fr"}
+ }),
+ {_id: 1, a: 1, loc: {type: "Point", coordinates: [1, 1]}});
},
- {
- test: function(db) {
- assert.commandWorked(db.test.mapReduce(() => {},
- (a, b) => {},
- {
- query: {$comment: logFormatTestComment},
- out: {inline: 1},
- }));
- },
- logFields: {
- command: "mapReduce",
- mapreduce: coll.getName(),
- planSummary: "COLLSCAN",
- keysExamined: 0,
- docsExamined: 10,
- $comment: logFormatTestComment,
- out: {inline: 1}
- }
+ // TODO SERVER-34208: display FAM update metrics in mongoS logs.
+ logFields: Object.assign((isMongos ? {} : {nMatched: 1, nModified: 1}), {
+ command: "findAndModify",
+ findandmodify: coll.getName(),
+ planSummary: "IXSCAN { _id: 1 }",
+ keysExamined: 1,
+ docsExamined: 1,
+ $comment: logFormatTestComment,
+ collation: {locale: "fr"}
+ })
+ },
+ {
+ test: function(db) {
+ assert.commandWorked(db.test.mapReduce(() => {}, (a, b) => {}, {
+ query: {$comment: logFormatTestComment},
+ out: {inline: 1},
+ }));
},
- {
- test: function(db) {
- assertWriteOK(db.test.update(
- {a: 1, $comment: logFormatTestComment}, {$inc: {b: 1}}, {multi: true}));
- },
- logFields: (isMongos ? {
- command: "update",
- update: coll.getName(),
- ordered: true,
- nMatched: 1,
- nModified: 1,
- nShards: stParams.shards
- }
- : {
+ logFields: {
+ command: "mapReduce",
+ mapreduce: coll.getName(),
+ planSummary: "COLLSCAN",
+ keysExamined: 0,
+ docsExamined: 10,
+ $comment: logFormatTestComment,
+ out: {inline: 1}
+ }
+ },
+ {
+ test: function(db) {
+ assertWriteOK(db.test.update(
+ {a: 1, $comment: logFormatTestComment}, {$inc: {b: 1}}, {multi: true}));
+ },
+ logFields: (isMongos ? {
+ command: "update",
+ update: coll.getName(),
+ ordered: true,
+ nMatched: 1,
+ nModified: 1,
+ nShards: stParams.shards
+ }
+ : {
q: {a: 1, $comment: logFormatTestComment},
u: {$inc: {b: 1}},
multi: true,
@@ -259,24 +260,24 @@
docsExamined: 10,
nMatched: 1,
nModified: 1
- })
+ })
+ },
+ {
+ test: function(db) {
+ assertWriteOK(db.test.update({_id: 100, $comment: logFormatTestComment},
+ {$inc: {b: 1}},
+ {multi: true, upsert: true}));
},
- {
- test: function(db) {
- assertWriteOK(db.test.update({_id: 100, $comment: logFormatTestComment},
- {$inc: {b: 1}},
- {multi: true, upsert: true}));
- },
- logFields: (isMongos ? {
- command: "update",
- update: coll.getName(),
- ordered: true,
- nMatched: 0,
- nModified: 0,
- upsert: 1,
- nShards: 1
- }
- : {
+ logFields: (isMongos ? {
+ command: "update",
+ update: coll.getName(),
+ ordered: true,
+ nMatched: 0,
+ nModified: 0,
+ upsert: 1,
+ nShards: 1
+ }
+ : {
q: {_id: 100, $comment: logFormatTestComment},
u: {$inc: {b: 1}},
multi: true,
@@ -286,32 +287,32 @@
nMatched: 0,
nModified: 0,
upsert: 1
- })
+ })
+ },
+ {
+ test: function(db) {
+ assertWriteOK(db.test.insert({z: 1, comment: logFormatTestComment}));
},
- {
- test: function(db) {
- assertWriteOK(db.test.insert({z: 1, comment: logFormatTestComment}));
- },
- logFields: {
- command: "insert",
- insert: `${coll.getName()}|${coll.getFullName()}`,
- keysInserted: 1,
- ninserted: 1,
- nShards: 1
- }
+ logFields: {
+ command: "insert",
+ insert: `${coll.getName()}|${coll.getFullName()}`,
+ keysInserted: 1,
+ ninserted: 1,
+ nShards: 1
+ }
+ },
+ {
+ test: function(db) {
+ assertWriteOK(db.test.remove({z: 1, $comment: logFormatTestComment}));
},
- {
- test: function(db) {
- assertWriteOK(db.test.remove({z: 1, $comment: logFormatTestComment}));
- },
- logFields: (isMongos ? {
- command: "delete",
- delete: coll.getName(),
- ordered: true,
- ndeleted: 1,
- nShards: stParams.shards
- }
- : {
+ logFields: (isMongos ? {
+ command: "delete",
+ delete: coll.getName(),
+ ordered: true,
+ ndeleted: 1,
+ nShards: stParams.shards
+ }
+ : {
q: {z: 1, $comment: logFormatTestComment},
limit: 0,
planSummary: "COLLSCAN",
@@ -319,195 +320,183 @@
docsExamined: 12,
ndeleted: 1,
keysDeleted: 1
- })
+ })
+ },
+ {
+ test: function(db) {
+ const originalSortBytes = db.adminCommand(
+ {getParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 1});
+ assert.commandWorked(originalSortBytes);
+ assert.commandWorked(db.adminCommand(
+ {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
+ assert.eq(coll.aggregate([{$match: {a: 1}}, {$sort: {a: 1}}], {allowDiskUse: true})
+ .itcount(),
+ 1);
+ assert.commandWorked(db.adminCommand({
+ setParameter: 1,
+ internalDocumentSourceSortMaxBlockingSortBytes:
+ originalSortBytes.internalDocumentSourceSortMaxBlockingSortBytes
+ }));
},
- {
- test: function(db) {
- const originalSortBytes = db.adminCommand(
- {getParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 1});
- assert.commandWorked(originalSortBytes);
- assert.commandWorked(db.adminCommand(
- {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
- assert.eq(
- coll.aggregate([{$match: {a: 1}}, {$sort: {a: 1}}], {allowDiskUse: true})
- .itcount(),
- 1);
- assert.commandWorked(db.adminCommand({
- setParameter: 1,
- internalDocumentSourceSortMaxBlockingSortBytes:
- originalSortBytes.internalDocumentSourceSortMaxBlockingSortBytes
- }));
- },
- logFields:
- {command: "aggregate", aggregate: coll.getName(), hasSortStage: 1, usedDisk: 1}
- }
- ];
-
- // Confirm log contains collation for find command.
- if (readWriteMode === "commands") {
- testList.push({
- test: function(db) {
- assert.eq(db.test.find({_id: {$in: [1, 5]}})
- .comment(logFormatTestComment)
- .collation({locale: "fr"})
- .itcount(),
- 2);
- },
- logFields: {
- command: "find",
- find: coll.getName(),
- planSummary: "IXSCAN { _id: 1 }",
- comment: logFormatTestComment,
- collation: {locale: "fr"},
- cursorExhausted: 1,
- keysExamined: 4,
- docsExamined: 2,
- nreturned: 2,
- nShards: 1
- }
- });
+ logFields:
+ {command: "aggregate", aggregate: coll.getName(), hasSortStage: 1, usedDisk: 1}
}
-
- // Confirm log content for getMore on both find and aggregate cursors.
- const originatingCommands = {
- find: {find: coll.getName(), batchSize: 0},
- aggregate: {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}
- };
-
- for (let cmdName in originatingCommands) {
- const cmdObj = originatingCommands[cmdName];
- const cmdRes = assert.commandWorked(db.runCommand(cmdObj));
-
- testList.push({
- test: function(db) {
- const cursor = new DBCommandCursor(db, cmdRes);
- assert.eq(cursor.itcount(), 11);
- },
- logFields: Object.assign({getMore: cmdRes.cursor.id}, cmdObj, {
- cursorid: cmdRes.cursor.id,
- planSummary: "COLLSCAN",
- cursorExhausted: 1,
- docsExamined: 11,
- keysExamined: 0,
- nreturned: 11,
- nShards: stParams.shards
- })
- });
- }
-
- // Run each of the test in the array, recording the log line found for each.
- const logLines =
- testList.map((testCase, arrIndex) => confirmLogContents(db, testCase, arrIndex));
-
- return [testList, logLines];
+ ];
+
+ // Confirm log contains collation for find command.
+ if (readWriteMode === "commands") {
+ testList.push({
+ test: function(db) {
+ assert.eq(db.test.find({_id: {$in: [1, 5]}})
+ .comment(logFormatTestComment)
+ .collation({locale: "fr"})
+ .itcount(),
+ 2);
+ },
+ logFields: {
+ command: "find",
+ find: coll.getName(),
+ planSummary: "IXSCAN { _id: 1 }",
+ comment: logFormatTestComment,
+ collation: {locale: "fr"},
+ cursorExhausted: 1,
+ keysExamined: 4,
+ docsExamined: 2,
+ nreturned: 2,
+ nShards: 1
+ }
+ });
}
- //
- // Helper functions.
- //
+ // Confirm log content for getMore on both find and aggregate cursors.
+ const originatingCommands = {
+ find: {find: coll.getName(), batchSize: 0},
+ aggregate: {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 0}}
+ };
- // Finds and returns a logline containing all the specified fields, or null if no such logline
- // was found. The regex escape function used here is drawn from the following:
- // https://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript
- // https://github.com/ljharb/regexp.escape
- function findMatchingLogLine(logLines, fields, ignoreFields) {
- function escapeRegex(input) {
- return (typeof input === "string"
- ? input.replace(/[\^\$\\\.\*\+\?\(\)\[\]\{\}]/g, '\\$&')
- : input);
- }
- function lineMatches(line, fields, ignoreFields) {
- const fieldNames =
- Object.keys(fields).filter((fieldName) => !ignoreFields.includes(fieldName));
- return fieldNames.every((fieldName) => {
- const fieldValue = fields[fieldName];
- let regex = escapeRegex(fieldName) + ":? ?(" +
- escapeRegex(checkLog.formatAsLogLine(fieldValue)) + "|" +
- escapeRegex(checkLog.formatAsLogLine(fieldValue, true)) + ")";
- const match = line.match(regex);
- return match && match[0];
- });
- }
+ for (let cmdName in originatingCommands) {
+ const cmdObj = originatingCommands[cmdName];
+ const cmdRes = assert.commandWorked(db.runCommand(cmdObj));
- for (let line of logLines) {
- if (lineMatches(line, fields, ignoreFields)) {
- return line;
- }
- }
- return null;
+ testList.push({
+ test: function(db) {
+ const cursor = new DBCommandCursor(db, cmdRes);
+ assert.eq(cursor.itcount(), 11);
+ },
+ logFields: Object.assign({getMore: cmdRes.cursor.id}, cmdObj, {
+ cursorid: cmdRes.cursor.id,
+ planSummary: "COLLSCAN",
+ cursorExhausted: 1,
+ docsExamined: 11,
+ keysExamined: 0,
+ nreturned: 11,
+ nShards: stParams.shards
+ })
+ });
}
- // In cases where some tests were not logged, this helper will identify and return them.
- function getUnloggedTests(testsRun, logLines) {
- return testsRun.filter((testCase, arrIndex) => !logLines[arrIndex]);
+ // Run each of the test in the array, recording the log line found for each.
+ const logLines =
+ testList.map((testCase, arrIndex) => confirmLogContents(db, testCase, arrIndex));
+
+ return [testList, logLines];
+}
+
+//
+// Helper functions.
+//
+
+// Finds and returns a logline containing all the specified fields, or null if no such logline
+// was found. The regex escape function used here is drawn from the following:
+// https://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript
+// https://github.com/ljharb/regexp.escape
+function findMatchingLogLine(logLines, fields, ignoreFields) {
+ function escapeRegex(input) {
+ return (typeof input === "string" ? input.replace(/[\^\$\\\.\*\+\?\(\)\[\]\{\}]/g, '\\$&')
+ : input);
+ }
+ function lineMatches(line, fields, ignoreFields) {
+ const fieldNames =
+ Object.keys(fields).filter((fieldName) => !ignoreFields.includes(fieldName));
+ return fieldNames.every((fieldName) => {
+ const fieldValue = fields[fieldName];
+ let regex = escapeRegex(fieldName) + ":? ?(" +
+ escapeRegex(checkLog.formatAsLogLine(fieldValue)) + "|" +
+ escapeRegex(checkLog.formatAsLogLine(fieldValue, true)) + ")";
+ const match = line.match(regex);
+ return match && match[0];
+ });
}
- //
- // Test cases for varying values of logLevel, slowms, and sampleRate.
- //
-
- for (let testDB of[shardDB, mongosDB]) {
- for (let readWriteMode of["commands", "legacy"]) {
- // Test that all operations are logged when slowMs is < 0 and sampleRate is 1 at the
- // default logLevel.
- let [testsRun, logLines] = runLoggingTests({
- db: testDB,
- readWriteMode: readWriteMode,
- slowMs: -1,
- logLevel: 0,
- sampleRate: 1.0
- });
- let unlogged = getUnloggedTests(testsRun, logLines);
- assert.eq(unlogged.length, 0, () => tojson(unlogged));
-
- // Test that only some operations are logged when sampleRate is < 1 at the default
- // logLevel, even when slowMs is < 0. The actual sample rate is probabilistic, and may
- // therefore vary quite significantly from 0.5. However, we have already established
- // that with sampleRate 1 *all* ops are logged, so here it is sufficient to confirm that
- // some ops are not. We repeat the test 5 times to minimize the odds of failure.
- let sampleRateTestsRun = 0, sampleRateTestsLogged = 0;
- for (let i = 0; i < 5; i++) {
- [testsRun, logLines] = runLoggingTests({
- db: testDB,
- readWriteMode: readWriteMode,
- slowMs: -1,
- logLevel: 0,
- sampleRate: 0.5
- });
- unlogged = getUnloggedTests(testsRun, logLines);
- sampleRateTestsLogged += (testsRun.length - unlogged.length);
- sampleRateTestsRun += testsRun.length;
- }
- assert.betweenEx(0, sampleRateTestsLogged, sampleRateTestsRun);
-
- // Test that only operations which exceed slowMs are logged when slowMs > 0 and
- // sampleRate is 1, at the default logLevel. The given value of slowMs will be applied
- // to every second op in the test, so only half of the ops should be logged.
+ for (let line of logLines) {
+ if (lineMatches(line, fields, ignoreFields)) {
+ return line;
+ }
+ }
+ return null;
+}
+
+// In cases where some tests were not logged, this helper will identify and return them.
+function getUnloggedTests(testsRun, logLines) {
+ return testsRun.filter((testCase, arrIndex) => !logLines[arrIndex]);
+}
+
+//
+// Test cases for varying values of logLevel, slowms, and sampleRate.
+//
+
+for (let testDB of [shardDB, mongosDB]) {
+ for (let readWriteMode of ["commands", "legacy"]) {
+ // Test that all operations are logged when slowMs is < 0 and sampleRate is 1 at the
+ // default logLevel.
+ let [testsRun, logLines] = runLoggingTests(
+ {db: testDB, readWriteMode: readWriteMode, slowMs: -1, logLevel: 0, sampleRate: 1.0});
+ let unlogged = getUnloggedTests(testsRun, logLines);
+ assert.eq(unlogged.length, 0, () => tojson(unlogged));
+
+ // Test that only some operations are logged when sampleRate is < 1 at the default
+ // logLevel, even when slowMs is < 0. The actual sample rate is probabilistic, and may
+ // therefore vary quite significantly from 0.5. However, we have already established
+ // that with sampleRate 1 *all* ops are logged, so here it is sufficient to confirm that
+ // some ops are not. We repeat the test 5 times to minimize the odds of failure.
+ let sampleRateTestsRun = 0, sampleRateTestsLogged = 0;
+ for (let i = 0; i < 5; i++) {
[testsRun, logLines] = runLoggingTests({
db: testDB,
readWriteMode: readWriteMode,
- slowMs: 1000000,
+ slowMs: -1,
logLevel: 0,
- sampleRate: 1.0
- });
- unlogged = getUnloggedTests(testsRun, logLines);
- assert.eq(unlogged.length, Math.floor(testsRun.length / 2), () => tojson(unlogged));
-
- // Test that all operations are logged when logLevel is 1, regardless of sampleRate and
- // slowMs. We pass 'null' for slowMs to signify that a high threshold should be set
- // (such that, at logLevel 0, no operations would be logged) and that this value should
- // be applied for all operations, rather than for every second op as in the case of the
- // slowMs test.
- [testsRun, logLines] = runLoggingTests({
- db: testDB,
- readWriteMode: readWriteMode,
- slowMs: null,
- logLevel: 1,
sampleRate: 0.5
});
unlogged = getUnloggedTests(testsRun, logLines);
- assert.eq(unlogged.length, 0, () => tojson(unlogged));
+ sampleRateTestsLogged += (testsRun.length - unlogged.length);
+ sampleRateTestsRun += testsRun.length;
}
+ assert.betweenEx(0, sampleRateTestsLogged, sampleRateTestsRun);
+
+ // Test that only operations which exceed slowMs are logged when slowMs > 0 and
+ // sampleRate is 1, at the default logLevel. The given value of slowMs will be applied
+ // to every second op in the test, so only half of the ops should be logged.
+ [testsRun, logLines] = runLoggingTests({
+ db: testDB,
+ readWriteMode: readWriteMode,
+ slowMs: 1000000,
+ logLevel: 0,
+ sampleRate: 1.0
+ });
+ unlogged = getUnloggedTests(testsRun, logLines);
+ assert.eq(unlogged.length, Math.floor(testsRun.length / 2), () => tojson(unlogged));
+
+ // Test that all operations are logged when logLevel is 1, regardless of sampleRate and
+ // slowMs. We pass 'null' for slowMs to signify that a high threshold should be set
+ // (such that, at logLevel 0, no operations would be logged) and that this value should
+ // be applied for all operations, rather than for every second op as in the case of the
+ // slowMs test.
+ [testsRun, logLines] = runLoggingTests(
+ {db: testDB, readWriteMode: readWriteMode, slowMs: null, logLevel: 1, sampleRate: 0.5});
+ unlogged = getUnloggedTests(testsRun, logLines);
+ assert.eq(unlogged.length, 0, () => tojson(unlogged));
}
- st.stop();
+}
+st.stop();
})();
diff --git a/jstests/noPassthrough/logical_session_cache_find_getmore.js b/jstests/noPassthrough/logical_session_cache_find_getmore.js
index a005b1c0ef5..4857443d032 100644
--- a/jstests/noPassthrough/logical_session_cache_find_getmore.js
+++ b/jstests/noPassthrough/logical_session_cache_find_getmore.js
@@ -1,28 +1,28 @@
(function() {
- 'use strict';
+'use strict';
- TestData.disableImplicitSessions = true;
+TestData.disableImplicitSessions = true;
- var conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}});
- var testDB = conn.getDB("test");
+var conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}});
+var testDB = conn.getDB("test");
- assert.writeOK(testDB.foo.insert({data: 1}));
- assert.writeOK(testDB.foo.insert({data: 2}));
+assert.writeOK(testDB.foo.insert({data: 1}));
+assert.writeOK(testDB.foo.insert({data: 2}));
- for (var i = 0; i < 2; i++) {
- var session = conn.startSession();
- var db = session.getDatabase("test");
- var res = assert.commandWorked(db.runCommand({find: "foo", batchSize: 1}),
- "unable to run find when the cache is not full");
- var cursorId = res.cursor.id;
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: "foo"}),
- "unable to run getMore when the cache is not full");
- }
+for (var i = 0; i < 2; i++) {
+ var session = conn.startSession();
+ var db = session.getDatabase("test");
+ var res = assert.commandWorked(db.runCommand({find: "foo", batchSize: 1}),
+ "unable to run find when the cache is not full");
+ var cursorId = res.cursor.id;
+ assert.commandWorked(db.runCommand({getMore: cursorId, collection: "foo"}),
+ "unable to run getMore when the cache is not full");
+}
- var session3 = conn.startSession();
- var db = session3.getDatabase("test");
- assert.commandFailed(db.runCommand({find: "foo", batchSize: 1}),
- "able to run find when the cache is full");
+var session3 = conn.startSession();
+var db = session3.getDatabase("test");
+assert.commandFailed(db.runCommand({find: "foo", batchSize: 1}),
+ "able to run find when the cache is full");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/logical_session_cursor_checks.js b/jstests/noPassthrough/logical_session_cursor_checks.js
index a12f46fc583..5664fb1ef28 100644
--- a/jstests/noPassthrough/logical_session_cursor_checks.js
+++ b/jstests/noPassthrough/logical_session_cursor_checks.js
@@ -1,97 +1,97 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
-
- function runFixture(Fixture) {
- var fixture = new Fixture();
- var conn = fixture.getConn();
- var admin = conn.getDB("admin");
- var data = conn.getDB("data_storage");
-
- admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
- admin.auth("admin", "admin");
- data.createUser({user: 'admin', pwd: 'admin', roles: jsTest.basicUserRoles});
- data.createUser({user: 'user0', pwd: 'password', roles: jsTest.basicUserRoles});
- admin.logout();
-
- data.auth("user0", "password");
- assert.writeOK(data.test.insert({name: "first", data: 1}));
- assert.writeOK(data.test.insert({name: "second", data: 2}));
-
- // Test that getMore works correctly on the same session.
- {
- var session1 = conn.startSession();
- var session2 = conn.startSession();
- var res = assert.commandWorked(
- session1.getDatabase("data_storage").runCommand({find: "test", batchSize: 0}));
- var cursorId = res.cursor.id;
- assert.commandWorked(session1.getDatabase("data_storage")
- .runCommand({getMore: cursorId, collection: "test"}));
-
- session2.endSession();
- session1.endSession();
- }
-
- // Test that getMore correctly gives an error, when using a cursor on a different session.
- {
- var session1 = conn.startSession();
- var session2 = conn.startSession();
- var res = assert.commandWorked(
- session1.getDatabase("data_storage").runCommand({find: "test", batchSize: 0}));
- var cursorId = res.cursor.id;
- assert.commandFailed(session2.getDatabase("data_storage")
- .runCommand({getMore: cursorId, collection: "test"}));
-
- session2.endSession();
- session1.endSession();
- }
-
- // Test that query.js driven getMore works correctly on the same session.
- {
- var session1 = conn.startSession();
- var session2 = conn.startSession();
- var cursor = session1.getDatabase("data_storage").test.find({}).batchSize(1);
- cursor.next();
- cursor.next();
- cursor.close();
-
- session2.endSession();
- session1.endSession();
- }
-
- fixture.stop();
+'use strict';
+
+function runFixture(Fixture) {
+ var fixture = new Fixture();
+ var conn = fixture.getConn();
+ var admin = conn.getDB("admin");
+ var data = conn.getDB("data_storage");
+
+ admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
+ admin.auth("admin", "admin");
+ data.createUser({user: 'admin', pwd: 'admin', roles: jsTest.basicUserRoles});
+ data.createUser({user: 'user0', pwd: 'password', roles: jsTest.basicUserRoles});
+ admin.logout();
+
+ data.auth("user0", "password");
+ assert.writeOK(data.test.insert({name: "first", data: 1}));
+ assert.writeOK(data.test.insert({name: "second", data: 2}));
+
+ // Test that getMore works correctly on the same session.
+ {
+ var session1 = conn.startSession();
+ var session2 = conn.startSession();
+ var res = assert.commandWorked(
+ session1.getDatabase("data_storage").runCommand({find: "test", batchSize: 0}));
+ var cursorId = res.cursor.id;
+ assert.commandWorked(session1.getDatabase("data_storage")
+ .runCommand({getMore: cursorId, collection: "test"}));
+
+ session2.endSession();
+ session1.endSession();
}
- function Standalone() {
- this.standalone = MongoRunner.runMongod({auth: "", nojournal: ""});
+ // Test that getMore correctly gives an error, when using a cursor on a different session.
+ {
+ var session1 = conn.startSession();
+ var session2 = conn.startSession();
+ var res = assert.commandWorked(
+ session1.getDatabase("data_storage").runCommand({find: "test", batchSize: 0}));
+ var cursorId = res.cursor.id;
+ assert.commandFailed(session2.getDatabase("data_storage")
+ .runCommand({getMore: cursorId, collection: "test"}));
+
+ session2.endSession();
+ session1.endSession();
}
- Standalone.prototype.stop = function() {
- MongoRunner.stopMongod(this.standalone);
- };
-
- Standalone.prototype.getConn = function() {
- return this.standalone;
- };
-
- function Sharding() {
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- this.st = new ShardingTest({
- shards: 1,
- config: 1,
- mongos: 1,
- other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
- });
+ // Test that query.js driven getMore works correctly on the same session.
+ {
+ var session1 = conn.startSession();
+ var session2 = conn.startSession();
+ var cursor = session1.getDatabase("data_storage").test.find({}).batchSize(1);
+ cursor.next();
+ cursor.next();
+ cursor.close();
+
+ session2.endSession();
+ session1.endSession();
}
- Sharding.prototype.stop = function() {
- this.st.stop();
- };
+ fixture.stop();
+}
- Sharding.prototype.getConn = function() {
- return this.st.s0;
- };
+function Standalone() {
+ this.standalone = MongoRunner.runMongod({auth: "", nojournal: ""});
+}
- [Standalone, Sharding].forEach(runFixture);
+Standalone.prototype.stop = function() {
+ MongoRunner.stopMongod(this.standalone);
+};
+
+Standalone.prototype.getConn = function() {
+ return this.standalone;
+};
+
+function Sharding() {
+ // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+ this.st = new ShardingTest({
+ shards: 1,
+ config: 1,
+ mongos: 1,
+ other: {keyFile: 'jstests/libs/key1', shardAsReplicaSet: false}
+ });
+}
+
+Sharding.prototype.stop = function() {
+ this.st.stop();
+};
+
+Sharding.prototype.getConn = function() {
+ return this.st.s0;
+};
+
+[Standalone, Sharding].forEach(runFixture);
})();
diff --git a/jstests/noPassthrough/loglong.js b/jstests/noPassthrough/loglong.js
index 9e3aa577f3b..db60b5f1745 100644
--- a/jstests/noPassthrough/loglong.js
+++ b/jstests/noPassthrough/loglong.js
@@ -2,53 +2,55 @@
// make sure very long long lines get truncated
(function() {
- "use strict";
+"use strict";
- const options = {setParameter: "maxLogSizeKB=9"};
- const conn = MongoRunner.runMongod(options);
+const options = {
+ setParameter: "maxLogSizeKB=9"
+};
+const conn = MongoRunner.runMongod(options);
- var db = conn.getDB('db');
- var res = db.adminCommand({getParameter: 1, maxLogSizeKB: 1});
- assert.eq(9, res.maxLogSizeKB);
+var db = conn.getDB('db');
+var res = db.adminCommand({getParameter: 1, maxLogSizeKB: 1});
+assert.eq(9, res.maxLogSizeKB);
- var t = db.loglong;
- t.drop();
+var t = db.loglong;
+t.drop();
- t.insert({x: 1});
+t.insert({x: 1});
- var n = 0;
- var query = {x: []};
- while (Object.bsonsize(query) < 30000) {
- query.x.push(n++);
- }
+var n = 0;
+var query = {x: []};
+while (Object.bsonsize(query) < 30000) {
+ query.x.push(n++);
+}
- assertLogTruncated(db, t, 9);
+assertLogTruncated(db, t, 9);
- var res = db.adminCommand({setParameter: 1, maxLogSizeKB: 8});
- assert.eq(res.ok, 1);
+var res = db.adminCommand({setParameter: 1, maxLogSizeKB: 8});
+assert.eq(res.ok, 1);
- assertLogTruncated(db, t, 8);
+assertLogTruncated(db, t, 8);
- function assertLogTruncated(db, t, maxLogSize) {
- var before = db.adminCommand({setParameter: 1, logLevel: 1});
+function assertLogTruncated(db, t, maxLogSize) {
+ var before = db.adminCommand({setParameter: 1, logLevel: 1});
- t.findOne(query);
+ t.findOne(query);
- var x = db.adminCommand({setParameter: 1, logLevel: before.was});
- assert.eq(1, x.was, tojson(x));
+ var x = db.adminCommand({setParameter: 1, logLevel: before.was});
+ assert.eq(1, x.was, tojson(x));
- var log = db.adminCommand({getLog: "global"}).log;
+ var log = db.adminCommand({getLog: "global"}).log;
- var found = false;
- var toFind = "warning: log line attempted (16kB) over max size (" + maxLogSize + "kB)";
- for (var i = log.length - 1; i >= 0; i--) {
- if (log[i].indexOf(toFind) >= 0) {
- found = true;
- break;
- }
+ var found = false;
+ var toFind = "warning: log line attempted (16kB) over max size (" + maxLogSize + "kB)";
+ for (var i = log.length - 1; i >= 0; i--) {
+ if (log[i].indexOf(toFind) >= 0) {
+ found = true;
+ break;
}
-
- assert(found, tojson(log));
}
- MongoRunner.stopMongod(conn);
+
+ assert(found, tojson(log));
+}
+MongoRunner.stopMongod(conn);
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/lookup_max_intermediate_size.js b/jstests/noPassthrough/lookup_max_intermediate_size.js
index 33f9976c058..378a4498afb 100644
--- a/jstests/noPassthrough/lookup_max_intermediate_size.js
+++ b/jstests/noPassthrough/lookup_max_intermediate_size.js
@@ -5,107 +5,106 @@
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
- "use strict";
-
- // Used by testPipeline to sort result documents. All _ids must be primitives.
- function compareId(a, b) {
- if (a._id < b._id) {
- return -1;
- }
- if (a._id > b._id) {
- return 1;
- }
- return 0;
- }
+"use strict";
- // Helper for testing that pipeline returns correct set of results.
- function testPipeline(pipeline, expectedResult, collection) {
- assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
- expectedResult.sort(compareId));
+// Used by testPipeline to sort result documents. All _ids must be primitives.
+function compareId(a, b) {
+ if (a._id < b._id) {
+ return -1;
+ }
+ if (a._id > b._id) {
+ return 1;
+ }
+ return 0;
+}
+
+// Helper for testing that pipeline returns correct set of results.
+function testPipeline(pipeline, expectedResult, collection) {
+ assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
+ expectedResult.sort(compareId));
+}
+
+function runTest(coll, from) {
+ const db = null; // Using the db variable is banned in this function.
+
+ //
+ // Confirm aggregation will not fail if intermediate $lookup stage exceeds 16 MB.
+ //
+ assert.commandWorked(coll.insert([
+ {"_id": 3, "same": 1},
+ ]));
+
+ const bigString = new Array(1025).toString();
+ const doc = {_id: new ObjectId(), x: bigString, same: 1};
+ const docSize = Object.bsonsize(doc);
+
+ // Number of documents in lookup to exceed maximum BSON document size.
+ // Using 20 MB instead to be safe.
+ let numDocs = Math.floor(20 * 1024 * 1024 / docSize);
+
+ let bulk = from.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({x: bigString, same: 1});
}
+ assert.commandWorked(bulk.execute());
- function runTest(coll, from) {
- const db = null; // Using the db variable is banned in this function.
+ let pipeline = [
+ {$lookup: {from: "from", localField: "same", foreignField: "same", as: "arr20mb"}},
+ {$project: {_id: 1}}
+ ];
- //
- // Confirm aggregation will not fail if intermediate $lookup stage exceeds 16 MB.
- //
- assert.commandWorked(coll.insert([
- {"_id": 3, "same": 1},
- ]));
+ let expectedResults = [{_id: 3}];
- const bigString = new Array(1025).toString();
- const doc = {_id: new ObjectId(), x: bigString, same: 1};
- const docSize = Object.bsonsize(doc);
+ testPipeline(pipeline, expectedResults, coll);
- // Number of documents in lookup to exceed maximum BSON document size.
- // Using 20 MB instead to be safe.
- let numDocs = Math.floor(20 * 1024 * 1024 / docSize);
+ //
+ // Confirm aggregation will fail if intermediate $lookup stage exceeds
+ // internalLookupStageIntermediateDocumentMaxSizeBytes, set to 30 MB.
+ //
- let bulk = from.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({x: bigString, same: 1});
- }
- assert.commandWorked(bulk.execute());
+ // Number of documents to exceed maximum intermediate $lookup stage document size.
+ // Using 35 MB total to be safe (20 MB from previous test + 15 MB).
+ numDocs = Math.floor(15 * 1024 * 1024 / docSize);
- let pipeline = [
- {$lookup: {from: "from", localField: "same", foreignField: "same", as: "arr20mb"}},
- {$project: {_id: 1}}
- ];
+ bulk = from.initializeUnorderedBulkOp();
+ for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({x: bigString, same: 1});
+ }
+ assert.commandWorked(bulk.execute());
- let expectedResults = [{_id: 3}];
+ pipeline = [
+ {$lookup: {from: "from", localField: "same", foreignField: "same", as: "arr35mb"}},
+ {$project: {_id: 1}}
+ ];
- testPipeline(pipeline, expectedResults, coll);
+ assertErrorCode(coll, pipeline, 4568);
+}
- //
- // Confirm aggregation will fail if intermediate $lookup stage exceeds
- // internalLookupStageIntermediateDocumentMaxSizeBytes, set to 30 MB.
- //
+// Run tests on single node.
+const standalone = MongoRunner.runMongod();
+const db = standalone.getDB("test");
- // Number of documents to exceed maximum intermediate $lookup stage document size.
- // Using 35 MB total to be safe (20 MB from previous test + 15 MB).
- numDocs = Math.floor(15 * 1024 * 1024 / docSize);
+assert.commandWorked(db.adminCommand(
+ {setParameter: 1, internalLookupStageIntermediateDocumentMaxSizeBytes: 30 * 1024 * 1024}));
- bulk = from.initializeUnorderedBulkOp();
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({x: bigString, same: 1});
- }
- assert.commandWorked(bulk.execute());
+runTest(db.lookUp, db.from);
- pipeline = [
- {$lookup: {from: "from", localField: "same", foreignField: "same", as: "arr35mb"}},
- {$project: {_id: 1}}
- ];
+MongoRunner.stopMongod(standalone);
- assertErrorCode(coll, pipeline, 4568);
+// Run tests in a sharded environment.
+const sharded = new ShardingTest({
+ mongos: 1,
+ shards: 2,
+ rs: {
+ nodes: 1,
+ setParameter: {internalLookupStageIntermediateDocumentMaxSizeBytes: 30 * 1024 * 1024}
}
+});
- // Run tests on single node.
- const standalone = MongoRunner.runMongod();
- const db = standalone.getDB("test");
-
- assert.commandWorked(db.adminCommand(
- {setParameter: 1, internalLookupStageIntermediateDocumentMaxSizeBytes: 30 * 1024 * 1024}));
-
- runTest(db.lookUp, db.from);
-
- MongoRunner.stopMongod(standalone);
-
- // Run tests in a sharded environment.
- const sharded = new ShardingTest({
- mongos: 1,
- shards: 2,
- rs: {
- nodes: 1,
- setParameter:
- {internalLookupStageIntermediateDocumentMaxSizeBytes: 30 * 1024 * 1024}
- }
- });
-
- assert(sharded.adminCommand({enableSharding: "test"}));
+assert(sharded.adminCommand({enableSharding: "test"}));
- assert(sharded.adminCommand({shardCollection: "test.lookUp", key: {_id: 'hashed'}}));
- runTest(sharded.getDB('test').lookUp, sharded.getDB('test').from);
+assert(sharded.adminCommand({shardCollection: "test.lookUp", key: {_id: 'hashed'}}));
+runTest(sharded.getDB('test').lookUp, sharded.getDB('test').from);
- sharded.stop();
+sharded.stop();
}());
diff --git a/jstests/noPassthrough/low_js_heap_limit.js b/jstests/noPassthrough/low_js_heap_limit.js
index 7ef5d99d583..a50072b5bf4 100644
--- a/jstests/noPassthrough/low_js_heap_limit.js
+++ b/jstests/noPassthrough/low_js_heap_limit.js
@@ -1,18 +1,18 @@
// SERVER-26596 This tests that you can set a very low heap limit for javascript, and that it will
// fail to run any javascript, but won't crash the server.
(function() {
- 'use strict';
+'use strict';
- const conn = MongoRunner.runMongod();
- var db = conn.getDB('db');
+const conn = MongoRunner.runMongod();
+var db = conn.getDB('db');
- assert.commandWorked(db.adminCommand({setParameter: 1, jsHeapLimitMB: 1}));
+assert.commandWorked(db.adminCommand({setParameter: 1, jsHeapLimitMB: 1}));
- db.foo.insert({x: 1});
- const e = assert.throws(() => db.foo.findOne({$where: 'sleep(10000);'}));
- assert.eq(e.code, ErrorCodes.ExceededMemoryLimit);
+db.foo.insert({x: 1});
+const e = assert.throws(() => db.foo.findOne({$where: 'sleep(10000);'}));
+assert.eq(e.code, ErrorCodes.ExceededMemoryLimit);
- var returnCode = runProgram("mongo", "--jsHeapLimitMB=1", "--nodb", "--eval='exit();'");
- assert.eq(returnCode, 1);
- MongoRunner.stopMongod(conn);
+var returnCode = runProgram("mongo", "--jsHeapLimitMB=1", "--nodb", "--eval='exit();'");
+assert.eq(returnCode, 1);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/match_expression_optimization_failpoint.js b/jstests/noPassthrough/match_expression_optimization_failpoint.js
index 9b30b41a767..590102ba8e8 100644
--- a/jstests/noPassthrough/match_expression_optimization_failpoint.js
+++ b/jstests/noPassthrough/match_expression_optimization_failpoint.js
@@ -1,42 +1,42 @@
// Tests that match expression optimization works properly when the failpoint isn't triggered, and
// is disabled properly when it is triggered.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
- Random.setRandomSeed();
+load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
+Random.setRandomSeed();
- const conn = MongoRunner.runMongod({});
- assert.neq(conn, null, "Mongod failed to start up.");
- const testDb = conn.getDB("test");
- const coll = testDb.agg_opt;
+const conn = MongoRunner.runMongod({});
+assert.neq(conn, null, "Mongod failed to start up.");
+const testDb = conn.getDB("test");
+const coll = testDb.agg_opt;
- const kTestZip = 44100;
- for (let i = 0; i < 25; ++i) {
- assert.commandWorked(coll.insert(
- {_id: kTestZip + i, city: "Cleveland", pop: Random.randInt(100000), state: "OH"}));
- }
+const kTestZip = 44100;
+for (let i = 0; i < 25; ++i) {
+ assert.commandWorked(coll.insert(
+ {_id: kTestZip + i, city: "Cleveland", pop: Random.randInt(100000), state: "OH"}));
+}
- const pipeline = [{$match: {_id: {$in: [kTestZip]}}}, {$sort: {_id: 1}}];
+const pipeline = [{$match: {_id: {$in: [kTestZip]}}}, {$sort: {_id: 1}}];
- const enabledPlan = coll.explain().aggregate(pipeline);
- // Test that a single equality condition $in was optimized to an $eq.
- assert.eq(enabledPlan.queryPlanner.parsedQuery._id.$eq, kTestZip);
+const enabledPlan = coll.explain().aggregate(pipeline);
+// Test that a single equality condition $in was optimized to an $eq.
+assert.eq(enabledPlan.queryPlanner.parsedQuery._id.$eq, kTestZip);
- const enabledResult = coll.aggregate(pipeline).toArray();
+const enabledResult = coll.aggregate(pipeline).toArray();
- // Enable a failpoint that will cause match expression optimizations to be skipped.
- assert.commandWorked(testDb.adminCommand(
- {configureFailPoint: "disableMatchExpressionOptimization", mode: "alwaysOn"}));
+// Enable a failpoint that will cause match expression optimizations to be skipped.
+assert.commandWorked(testDb.adminCommand(
+ {configureFailPoint: "disableMatchExpressionOptimization", mode: "alwaysOn"}));
- const disabledPlan = coll.explain().aggregate(pipeline);
- // Test that the $in query still exists and hasn't been optimized to an $eq.
- assert.eq(disabledPlan.queryPlanner.parsedQuery._id.$in, [kTestZip]);
+const disabledPlan = coll.explain().aggregate(pipeline);
+// Test that the $in query still exists and hasn't been optimized to an $eq.
+assert.eq(disabledPlan.queryPlanner.parsedQuery._id.$in, [kTestZip]);
- const disabledResult = coll.aggregate(pipeline).toArray();
+const disabledResult = coll.aggregate(pipeline).toArray();
- // Test that the result is the same with and without optimizations enabled (result is sorted).
- assert.eq(enabledResult, disabledResult);
+// Test that the result is the same with and without optimizations enabled (result is sorted).
+assert.eq(enabledResult, disabledResult);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js b/jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js
index fe24cb47f4d..00428d7abe2 100644
--- a/jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js
+++ b/jstests/noPassthrough/maxTransactionLockRequestTimeoutMillis_serverParameter.js
@@ -1,18 +1,18 @@
// Tests the maxTransactionLockRequestTimeoutMillis server parameter.
(function() {
- 'use strict';
+'use strict';
- load("jstests/noPassthrough/libs/server_parameter_helpers.js");
+load("jstests/noPassthrough/libs/server_parameter_helpers.js");
- // Valid parameter values are in the range (-infinity, infinity).
- testNumericServerParameter("maxTransactionLockRequestTimeoutMillis",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 5 /*defaultValue*/,
- 30 /*nonDefaultValidValue*/,
- false /*hasLowerBound*/,
- "unused" /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range (-infinity, infinity).
+testNumericServerParameter("maxTransactionLockRequestTimeoutMillis",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 5 /*defaultValue*/,
+ 30 /*nonDefaultValidValue*/,
+ false /*hasLowerBound*/,
+ "unused" /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
})();
diff --git a/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js b/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js
index 3f833d3220b..9b513afafe2 100644
--- a/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js
+++ b/jstests/noPassthrough/max_acceptable_logical_clock_drift_secs_parameter.js
@@ -5,55 +5,52 @@
* @tags: [requires_sharding]
*/
(function() {
- "use strict";
-
- // maxAcceptableLogicalClockDriftSecs cannot be negative, zero, or a non-number.
- let conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: -1}});
- assert.eq(null, conn, "expected server to reject negative maxAcceptableLogicalClockDriftSecs");
-
- conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: 0}});
- assert.eq(null, conn, "expected server to reject zero maxAcceptableLogicalClockDriftSecs");
-
- conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: "value"}});
- assert.eq(
- null, conn, "expected server to reject non-numeric maxAcceptableLogicalClockDriftSecs");
-
- conn = MongoRunner.runMongod(
- {setParameter: {maxAcceptableLogicalClockDriftSecs: new Timestamp(50, 0)}});
- assert.eq(
- null, conn, "expected server to reject non-numeric maxAcceptableLogicalClockDriftSecs");
-
- // Any positive number is valid.
- conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: 1}});
- assert.neq(null, conn, "failed to start mongod with valid maxAcceptableLogicalClockDriftSecs");
- MongoRunner.stopMongod(conn);
-
- conn = MongoRunner.runMongod({
- setParameter: {maxAcceptableLogicalClockDriftSecs: 60 * 60 * 24 * 365 * 10}
- }); // 10 years.
- assert.neq(null, conn, "failed to start mongod with valid maxAcceptableLogicalClockDriftSecs");
- MongoRunner.stopMongod(conn);
-
- // Verify maxAcceptableLogicalClockDriftSecs works as expected in a sharded cluster.
- const maxDriftValue = 100;
- const st = new ShardingTest({
- shards: 1,
- shardOptions: {setParameter: {maxAcceptableLogicalClockDriftSecs: maxDriftValue}},
- mongosOptions: {setParameter: {maxAcceptableLogicalClockDriftSecs: maxDriftValue}}
- });
- let testDB = st.s.getDB("test");
-
- // Contact cluster to get initial cluster time.
- let res = assert.commandWorked(testDB.runCommand({isMaster: 1}));
- let lt = res.$clusterTime;
-
- // Try to advance cluster time by more than the max acceptable drift, which should fail the rate
- // limiter.
- let tooFarTime = Object.assign(
- {}, lt, {clusterTime: new Timestamp(lt.clusterTime.getTime() + (maxDriftValue * 2), 0)});
- assert.commandFailedWithCode(testDB.runCommand({isMaster: 1, $clusterTime: tooFarTime}),
- ErrorCodes.ClusterTimeFailsRateLimiter,
- "expected command to not pass the rate limiter");
-
- st.stop();
+"use strict";
+
+// maxAcceptableLogicalClockDriftSecs cannot be negative, zero, or a non-number.
+let conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: -1}});
+assert.eq(null, conn, "expected server to reject negative maxAcceptableLogicalClockDriftSecs");
+
+conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: 0}});
+assert.eq(null, conn, "expected server to reject zero maxAcceptableLogicalClockDriftSecs");
+
+conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: "value"}});
+assert.eq(null, conn, "expected server to reject non-numeric maxAcceptableLogicalClockDriftSecs");
+
+conn = MongoRunner.runMongod(
+ {setParameter: {maxAcceptableLogicalClockDriftSecs: new Timestamp(50, 0)}});
+assert.eq(null, conn, "expected server to reject non-numeric maxAcceptableLogicalClockDriftSecs");
+
+// Any positive number is valid.
+conn = MongoRunner.runMongod({setParameter: {maxAcceptableLogicalClockDriftSecs: 1}});
+assert.neq(null, conn, "failed to start mongod with valid maxAcceptableLogicalClockDriftSecs");
+MongoRunner.stopMongod(conn);
+
+conn = MongoRunner.runMongod(
+ {setParameter: {maxAcceptableLogicalClockDriftSecs: 60 * 60 * 24 * 365 * 10}}); // 10 years.
+assert.neq(null, conn, "failed to start mongod with valid maxAcceptableLogicalClockDriftSecs");
+MongoRunner.stopMongod(conn);
+
+// Verify maxAcceptableLogicalClockDriftSecs works as expected in a sharded cluster.
+const maxDriftValue = 100;
+const st = new ShardingTest({
+ shards: 1,
+ shardOptions: {setParameter: {maxAcceptableLogicalClockDriftSecs: maxDriftValue}},
+ mongosOptions: {setParameter: {maxAcceptableLogicalClockDriftSecs: maxDriftValue}}
+});
+let testDB = st.s.getDB("test");
+
+// Contact cluster to get initial cluster time.
+let res = assert.commandWorked(testDB.runCommand({isMaster: 1}));
+let lt = res.$clusterTime;
+
+// Try to advance cluster time by more than the max acceptable drift, which should fail the rate
+// limiter.
+let tooFarTime = Object.assign(
+ {}, lt, {clusterTime: new Timestamp(lt.clusterTime.getTime() + (maxDriftValue * 2), 0)});
+assert.commandFailedWithCode(testDB.runCommand({isMaster: 1, $clusterTime: tooFarTime}),
+ ErrorCodes.ClusterTimeFailsRateLimiter,
+ "expected command to not pass the rate limiter");
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/max_bson_depth_parameter.js b/jstests/noPassthrough/max_bson_depth_parameter.js
index ec71f659e6d..bd39676bb98 100644
--- a/jstests/noPassthrough/max_bson_depth_parameter.js
+++ b/jstests/noPassthrough/max_bson_depth_parameter.js
@@ -3,32 +3,32 @@
* given an invalid depth.
*/
(function() {
- "use strict";
+"use strict";
- const kTestName = "max_bson_depth_parameter";
+const kTestName = "max_bson_depth_parameter";
- // Start mongod with a valid BSON depth, then test that it accepts and rejects command
- // appropriately based on the depth.
- let conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=5"});
- assert.neq(null, conn, "Failed to start mongod");
- let testDB = conn.getDB("test");
+// Start mongod with a valid BSON depth, then test that it accepts and rejects command
+// appropriately based on the depth.
+let conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=5"});
+assert.neq(null, conn, "Failed to start mongod");
+let testDB = conn.getDB("test");
- assert.commandWorked(testDB.runCommand({ping: 1}), "Failed to run a command on the server");
- assert.commandFailedWithCode(
- testDB.runCommand({find: "coll", filter: {x: {x: {x: {x: {x: {x: 1}}}}}}}),
- ErrorCodes.Overflow,
- "Expected server to reject command for exceeding the nesting depth limit");
+assert.commandWorked(testDB.runCommand({ping: 1}), "Failed to run a command on the server");
+assert.commandFailedWithCode(
+ testDB.runCommand({find: "coll", filter: {x: {x: {x: {x: {x: {x: 1}}}}}}}),
+ ErrorCodes.Overflow,
+ "Expected server to reject command for exceeding the nesting depth limit");
- // Confirm depth limits for $lookup.
- assert.writeOK(testDB.coll1.insert({_id: 1}));
- assert.writeOK(testDB.coll2.insert({_id: 1}));
+// Confirm depth limits for $lookup.
+assert.writeOK(testDB.coll1.insert({_id: 1}));
+assert.writeOK(testDB.coll2.insert({_id: 1}));
- assert.commandWorked(testDB.runCommand({
- aggregate: "coll1",
- pipeline: [{$lookup: {from: "coll2", as: "as", pipeline: []}}],
- cursor: {}
- }));
- assert.commandFailedWithCode(
+assert.commandWorked(testDB.runCommand({
+ aggregate: "coll1",
+ pipeline: [{$lookup: {from: "coll2", as: "as", pipeline: []}}],
+ cursor: {}
+}));
+assert.commandFailedWithCode(
testDB.runCommand({
aggregate: "coll1",
pipeline: [{
@@ -43,11 +43,11 @@
ErrorCodes.Overflow,
"Expected server to reject command for exceeding the nesting depth limit");
- // Restart mongod with a negative maximum BSON depth and test that it fails to start.
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=-4"});
- assert.eq(null, conn, "Expected mongod to fail at startup because depth was negative");
+// Restart mongod with a negative maximum BSON depth and test that it fails to start.
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=-4"});
+assert.eq(null, conn, "Expected mongod to fail at startup because depth was negative");
- conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=1"});
- assert.eq(null, conn, "Expected mongod to fail at startup because depth was too low");
+conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=1"});
+assert.eq(null, conn, "Expected mongod to fail at startup because depth was too low");
}());
diff --git a/jstests/noPassthrough/max_conns_override.js b/jstests/noPassthrough/max_conns_override.js
index 5fa28804856..07d012d663f 100644
--- a/jstests/noPassthrough/max_conns_override.js
+++ b/jstests/noPassthrough/max_conns_override.js
@@ -1,45 +1,45 @@
(function() {
- 'use strict';
- const configuredMaxConns = 5;
- const configuredReadyAdminThreads = 3;
- let conn = MongoRunner.runMongod({
- config: "jstests/noPassthrough/libs/max_conns_override_config.yaml",
- // We check a specific field in this executor's serverStatus section
- serviceExecutor: "synchronous",
- });
+'use strict';
+const configuredMaxConns = 5;
+const configuredReadyAdminThreads = 3;
+let conn = MongoRunner.runMongod({
+ config: "jstests/noPassthrough/libs/max_conns_override_config.yaml",
+ // We check a specific field in this executor's serverStatus section
+ serviceExecutor: "synchronous",
+});
- // Use up all the maxConns with junk connections, all of these should succeed
- let maxConns = [];
- for (let i = 0; i < 5; i++) {
- maxConns.push(new Mongo(`127.0.0.1:${conn.port}`));
- let tmpDb = maxConns[maxConns.length - 1].getDB("admin");
- assert.commandWorked(tmpDb.runCommand({isMaster: 1}));
- }
+// Use up all the maxConns with junk connections, all of these should succeed
+let maxConns = [];
+for (let i = 0; i < 5; i++) {
+ maxConns.push(new Mongo(`127.0.0.1:${conn.port}`));
+ let tmpDb = maxConns[maxConns.length - 1].getDB("admin");
+ assert.commandWorked(tmpDb.runCommand({isMaster: 1}));
+}
- // Get serverStatus to check that we have the right number of threads in the right places
- let status = conn.getDB("admin").runCommand({serverStatus: 1});
- const connectionsStatus = status["connections"];
- const reservedExecutorStatus = connectionsStatus["adminConnections"];
- const normalExecutorStatus = status["network"]["serviceExecutorTaskStats"];
+// Get serverStatus to check that we have the right number of threads in the right places
+let status = conn.getDB("admin").runCommand({serverStatus: 1});
+const connectionsStatus = status["connections"];
+const reservedExecutorStatus = connectionsStatus["adminConnections"];
+const normalExecutorStatus = status["network"]["serviceExecutorTaskStats"];
- // Log these serverStatus sections so we can debug this easily
- print("connections status section: ", tojson(connectionsStatus));
- print("normal executor status section: ", tojson(normalExecutorStatus));
+// Log these serverStatus sections so we can debug this easily
+print("connections status section: ", tojson(connectionsStatus));
+print("normal executor status section: ", tojson(normalExecutorStatus));
- // The number of "available" connections should be less than zero, because we've used
- // all of maxConns. We're over the limit!
- assert.lt(connectionsStatus["available"], 0);
- // The number of "current" connections should be greater than maxConns
- assert.gt(connectionsStatus["current"], configuredMaxConns);
- // The number of ready threads should be the number of readyThreads we configured, since
- // every thread spawns a new thread on startup
- assert.eq(reservedExecutorStatus["readyThreads"] + reservedExecutorStatus["startingThreads"],
- configuredReadyAdminThreads);
- // The number of running admin threads should be greater than the readyThreads, because
- // one is being used right now
- assert.gt(reservedExecutorStatus["threadsRunning"], reservedExecutorStatus["readyThreads"]);
- // The normal serviceExecutor should only be running maxConns number of threads
- assert.eq(normalExecutorStatus["threadsRunning"], configuredMaxConns);
+// The number of "available" connections should be less than zero, because we've used
+// all of maxConns. We're over the limit!
+assert.lt(connectionsStatus["available"], 0);
+// The number of "current" connections should be greater than maxConns
+assert.gt(connectionsStatus["current"], configuredMaxConns);
+// The number of ready threads should be the number of readyThreads we configured, since
+// every thread spawns a new thread on startup
+assert.eq(reservedExecutorStatus["readyThreads"] + reservedExecutorStatus["startingThreads"],
+ configuredReadyAdminThreads);
+// The number of running admin threads should be greater than the readyThreads, because
+// one is being used right now
+assert.gt(reservedExecutorStatus["threadsRunning"], reservedExecutorStatus["readyThreads"]);
+// The normal serviceExecutor should only be running maxConns number of threads
+assert.eq(normalExecutorStatus["threadsRunning"], configuredMaxConns);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/max_time_ms_repl_targeting.js b/jstests/noPassthrough/max_time_ms_repl_targeting.js
index 792885ab0f3..de90a7a0d24 100644
--- a/jstests/noPassthrough/max_time_ms_repl_targeting.js
+++ b/jstests/noPassthrough/max_time_ms_repl_targeting.js
@@ -1,69 +1,69 @@
// SERVER-35132 Test that we still honor maxTimeMs during replica set targeting.
// @tags: [requires_replication]
(function() {
- 'use strict';
- var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 2}});
- var kDbName = 'test';
- var ns = 'test.foo';
- var mongos = st.s0;
- var testColl = mongos.getCollection(ns);
+'use strict';
+var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 2}});
+var kDbName = 'test';
+var ns = 'test.foo';
+var mongos = st.s0;
+var testColl = mongos.getCollection(ns);
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- // Since this test is timing sensitive, retry on failures since they could be transient.
- // If broken, this would *always* fail so if it ever passes this build is fine (or time went
- // backwards).
- const tryFiveTimes = function(name, f) {
- jsTestLog(`Starting test ${name}`);
+// Since this test is timing sensitive, retry on failures since they could be transient.
+// If broken, this would *always* fail so if it ever passes this build is fine (or time went
+// backwards).
+const tryFiveTimes = function(name, f) {
+ jsTestLog(`Starting test ${name}`);
- for (var trial = 1; trial <= 5; trial++) {
- try {
- f();
- } catch (e) {
- if (trial < 5) {
- jsTestLog(`Ignoring error during trial ${trial} of test ${name}`);
- continue;
- }
-
- jsTestLog(`Failed 5 times in test ${name}. There is probably a bug here.`);
- throw e;
+ for (var trial = 1; trial <= 5; trial++) {
+ try {
+ f();
+ } catch (e) {
+ if (trial < 5) {
+ jsTestLog(`Ignoring error during trial ${trial} of test ${name}`);
+ continue;
}
+
+ jsTestLog(`Failed 5 times in test ${name}. There is probably a bug here.`);
+ throw e;
}
- };
+ }
+};
- const runTest = function() {
- // Sanity Check
- assert.eq(testColl.find({_id: 1}).next(), {_id: 1});
+const runTest = function() {
+ // Sanity Check
+ assert.eq(testColl.find({_id: 1}).next(), {_id: 1});
- // MaxTimeMS with satisfiable readPref
- assert.eq(testColl.find({_id: 1}).readPref("secondary").maxTimeMS(1000).next(), {_id: 1});
+ // MaxTimeMS with satisfiable readPref
+ assert.eq(testColl.find({_id: 1}).readPref("secondary").maxTimeMS(1000).next(), {_id: 1});
- let ex = null;
+ let ex = null;
- // MaxTimeMS with unsatisfiable readPref
- const time = Date.timeFunc(() => {
- ex = assert.throws(() => {
- testColl.find({_id: 1})
- .readPref("secondary", [{tag: "noSuchTag"}])
- .maxTimeMS(1000)
- .next();
- });
+ // MaxTimeMS with unsatisfiable readPref
+ const time = Date.timeFunc(() => {
+ ex = assert.throws(() => {
+ testColl.find({_id: 1})
+ .readPref("secondary", [{tag: "noSuchTag"}])
+ .maxTimeMS(1000)
+ .next();
});
+ });
- assert.gte(time, 1000); // Make sure we at least waited 1 second.
- assert.lt(time, 15 * 1000); // We used to wait 20 seconds before timing out.
+ assert.gte(time, 1000); // Make sure we at least waited 1 second.
+ assert.lt(time, 15 * 1000); // We used to wait 20 seconds before timing out.
- assert.eq(ex.code, ErrorCodes.MaxTimeMSExpired);
- };
+ assert.eq(ex.code, ErrorCodes.MaxTimeMSExpired);
+};
- testColl.insert({_id: 1}, {writeConcern: {w: 2}});
- tryFiveTimes("totally unsharded", runTest);
+testColl.insert({_id: 1}, {writeConcern: {w: 2}});
+tryFiveTimes("totally unsharded", runTest);
- assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- tryFiveTimes("sharded db", runTest);
+assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+tryFiveTimes("sharded db", runTest);
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 1}}));
- tryFiveTimes("sharded collection", runTest);
+assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 1}}));
+tryFiveTimes("sharded collection", runTest);
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/member_id_too_large.js b/jstests/noPassthrough/member_id_too_large.js
index 9e514d49a8e..c265df315ca 100644
--- a/jstests/noPassthrough/member_id_too_large.js
+++ b/jstests/noPassthrough/member_id_too_large.js
@@ -2,36 +2,38 @@
// members in the set, followed by waiting for writeConcern with "w" values equal to size of set.
// @tags: [requires_replication]
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- jsTestLog("replSetInitiate with member _id greater than number of members");
+jsTestLog("replSetInitiate with member _id greater than number of members");
- let conf = rst.getReplSetConfig();
- conf.members[1]._id = 2;
+let conf = rst.getReplSetConfig();
+conf.members[1]._id = 2;
- rst.initiate(conf);
+rst.initiate(conf);
- const dbName = "test";
- const collName = "test";
- const primary = rst.getPrimary();
- const testColl = primary.getDB(dbName).getCollection(collName);
- const doc = {a: 1};
+const dbName = "test";
+const collName = "test";
+const primary = rst.getPrimary();
+const testColl = primary.getDB(dbName).getCollection(collName);
+const doc = {
+ a: 1
+};
- assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 2}}));
+assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 2}}));
- jsTestLog("replSetReconfig with member _id greater than number of members");
+jsTestLog("replSetReconfig with member _id greater than number of members");
- let secondary2 = MongoRunner.runMongod({replSet: rst.name});
- conf = rst.getReplSetConfigFromNode();
- conf.version++;
- conf.members.push({_id: 5, host: secondary2.host});
- assert.commandWorked(primary.getDB("admin").runCommand({replSetReconfig: conf}));
- assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 2}}));
- assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 3}}));
+let secondary2 = MongoRunner.runMongod({replSet: rst.name});
+conf = rst.getReplSetConfigFromNode();
+conf.version++;
+conf.members.push({_id: 5, host: secondary2.host});
+assert.commandWorked(primary.getDB("admin").runCommand({replSetReconfig: conf}));
+assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 2}}));
+assert.commandWorked(testColl.insert(doc, {writeConcern: {w: 3}}));
- MongoRunner.stopMongod(secondary2);
- rst.stopSet();
+MongoRunner.stopMongod(secondary2);
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/merge_max_time_ms.js b/jstests/noPassthrough/merge_max_time_ms.js
index e7bebd8a2cb..fb55b13604b 100644
--- a/jstests/noPassthrough/merge_max_time_ms.js
+++ b/jstests/noPassthrough/merge_max_time_ms.js
@@ -3,262 +3,251 @@
* @tags: [requires_sharding, requires_replication]
*/
(function() {
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode().
- load("jstests/libs/fixture_helpers.js"); // For isMongos().
- load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
-
- const kDBName = "test";
- const kSourceCollName = "merge_max_time_ms_source";
- const kDestCollName = "merge_max_time_ms_dest";
- const nDocs = 10;
-
- /**
- * Helper for populating the collection.
- */
- function insertDocs(coll) {
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({_id: i}));
- }
- }
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode().
+load("jstests/libs/fixture_helpers.js"); // For isMongos().
+load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
- /**
- * Wait until the server sets its CurOp "msg" to the failpoint name, indicating that it's
- * hanging.
- */
- function waitUntilServerHangsOnFailPoint(conn, fpName) {
- // Be sure that the server is hanging on the failpoint.
- assert.soon(function() {
- const filter = {"msg": fpName};
- const ops = conn.getDB("admin")
- .aggregate([{$currentOp: {allUsers: true}}, {$match: filter}])
- .toArray();
- return ops.length == 1;
- });
- }
+const kDBName = "test";
+const kSourceCollName = "merge_max_time_ms_source";
+const kDestCollName = "merge_max_time_ms_dest";
+const nDocs = 10;
- /**
- * Given a $merge parameters mongod connection, run a $out aggregation against 'conn' which
- * hangs on the given failpoint and ensure that the $out maxTimeMS expires.
- */
- function forceAggregationToHangAndCheckMaxTimeMsExpires(
- whenMatched, whenNotMatched, conn, failPointName) {
- // Use a short maxTimeMS so that the test completes in a reasonable amount of time. We will
- // use the 'maxTimeNeverTimeOut' failpoint to ensure that the operation does not
- // prematurely time out.
- const maxTimeMS = 1000 * 2;
-
- // Enable a failPoint so that the write will hang.
- let failpointCommand = {
- configureFailPoint: failPointName,
- mode: "alwaysOn",
- data: {nss: kDBName + "." + kDestCollName}
- };
-
- assert.commandWorked(conn.getDB("admin").runCommand(failpointCommand));
-
- // Make sure we don't run out of time before the failpoint is hit.
- assert.commandWorked(conn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
-
- // Build the parallel shell function.
- let shellStr = `const sourceColl = db['${kSourceCollName}'];`;
- shellStr += `const destColl = db['${kDestCollName}'];`;
- shellStr += `const maxTimeMS = ${maxTimeMS};`;
- shellStr += `const whenMatched = ${tojson(whenMatched)};`;
- shellStr += `const whenNotMatched = '${whenNotMatched}';`;
- const runAggregate = function() {
- const pipeline = [{
- $merge: {
- into: destColl.getName(),
- whenMatched: whenMatched,
- whenNotMatched: whenNotMatched
- }
- }];
- const err = assert.throws(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
- assert.eq(err.code, ErrorCodes.MaxTimeMSExpired, "expected aggregation to fail");
- };
- shellStr += `(${runAggregate.toString()})();`;
- const awaitShell = startParallelShell(shellStr, conn.port);
-
- waitUntilServerHangsOnFailPoint(conn, failPointName);
-
- assert.commandWorked(conn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
-
- // The aggregation running in the parallel shell will hang on the failpoint, burning
- // its time. Wait until the maxTimeMS has definitely expired.
- sleep(maxTimeMS + 2000);
-
- // Now drop the failpoint, allowing the aggregation to proceed. It should hit an
- // interrupt check and terminate immediately.
- assert.commandWorked(
- conn.getDB("admin").runCommand({configureFailPoint: failPointName, mode: "off"}));
-
- // Wait for the parallel shell to finish.
- assert.eq(awaitShell(), 0);
+/**
+ * Helper for populating the collection.
+ */
+function insertDocs(coll) {
+ for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({_id: i}));
}
+}
- function runUnshardedTest(whenMatched, whenNotMatched, conn) {
- jsTestLog("Running unsharded test in whenMatched: " + whenMatched + " whenNotMatched: " +
- whenNotMatched);
- // The target collection will always be empty so we do not test the setting that will cause
- // only failure.
- if (whenNotMatched == "fail") {
- return;
- }
-
- const sourceColl = conn.getDB(kDBName)[kSourceCollName];
- const destColl = conn.getDB(kDBName)[kDestCollName];
- assert.commandWorked(destColl.remove({}));
-
- // Be sure we're able to read from a cursor with a maxTimeMS set on it.
- (function() {
- // Use a long maxTimeMS, since we expect the operation to finish.
- const maxTimeMS = 1000 * 600;
- const pipeline = [{
- $merge: {
- into: destColl.getName(),
- whenMatched: whenMatched,
- whenNotMatched: whenNotMatched
- }
- }];
- assert.doesNotThrow(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
- })();
-
- assert.commandWorked(destColl.remove({}));
-
- // Force the aggregation to hang while the batch is being written. The failpoint changes
- // depending on the mode. If 'whenMatched' is set to "fail" then the implementation will end
- // up issuing insert commands instead of updates.
- const kFailPointName =
- whenMatched == "fail" ? "hangDuringBatchInsert" : "hangDuringBatchUpdate";
- forceAggregationToHangAndCheckMaxTimeMsExpires(
- whenMatched, whenNotMatched, conn, kFailPointName);
-
- assert.commandWorked(destColl.remove({}));
-
- // Force the aggregation to hang while the batch is being built.
- forceAggregationToHangAndCheckMaxTimeMsExpires(
- whenMatched, whenNotMatched, conn, "hangWhileBuildingDocumentSourceMergeBatch");
+/**
+ * Wait until the server sets its CurOp "msg" to the failpoint name, indicating that it's
+ * hanging.
+ */
+function waitUntilServerHangsOnFailPoint(conn, fpName) {
+ // Be sure that the server is hanging on the failpoint.
+ assert.soon(function() {
+ const filter = {"msg": fpName};
+ const ops = conn.getDB("admin")
+ .aggregate([{$currentOp: {allUsers: true}}, {$match: filter}])
+ .toArray();
+ return ops.length == 1;
+ });
+}
+
+/**
+ * Given a $merge parameters mongod connection, run a $out aggregation against 'conn' which
+ * hangs on the given failpoint and ensure that the $out maxTimeMS expires.
+ */
+function forceAggregationToHangAndCheckMaxTimeMsExpires(
+ whenMatched, whenNotMatched, conn, failPointName) {
+ // Use a short maxTimeMS so that the test completes in a reasonable amount of time. We will
+ // use the 'maxTimeNeverTimeOut' failpoint to ensure that the operation does not
+ // prematurely time out.
+ const maxTimeMS = 1000 * 2;
+
+ // Enable a failPoint so that the write will hang.
+ let failpointCommand = {
+ configureFailPoint: failPointName,
+ mode: "alwaysOn",
+ data: {nss: kDBName + "." + kDestCollName}
+ };
+
+ assert.commandWorked(conn.getDB("admin").runCommand(failpointCommand));
+
+ // Make sure we don't run out of time before the failpoint is hit.
+ assert.commandWorked(conn.getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
+
+ // Build the parallel shell function.
+ let shellStr = `const sourceColl = db['${kSourceCollName}'];`;
+ shellStr += `const destColl = db['${kDestCollName}'];`;
+ shellStr += `const maxTimeMS = ${maxTimeMS};`;
+ shellStr += `const whenMatched = ${tojson(whenMatched)};`;
+ shellStr += `const whenNotMatched = '${whenNotMatched}';`;
+ const runAggregate = function() {
+ const pipeline = [{
+ $merge:
+ {into: destColl.getName(), whenMatched: whenMatched, whenNotMatched: whenNotMatched}
+ }];
+ const err = assert.throws(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
+ assert.eq(err.code, ErrorCodes.MaxTimeMSExpired, "expected aggregation to fail");
+ };
+ shellStr += `(${runAggregate.toString()})();`;
+ const awaitShell = startParallelShell(shellStr, conn.port);
+
+ waitUntilServerHangsOnFailPoint(conn, failPointName);
+
+ assert.commandWorked(
+ conn.getDB("admin").runCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
+
+ // The aggregation running in the parallel shell will hang on the failpoint, burning
+ // its time. Wait until the maxTimeMS has definitely expired.
+ sleep(maxTimeMS + 2000);
+
+ // Now drop the failpoint, allowing the aggregation to proceed. It should hit an
+ // interrupt check and terminate immediately.
+ assert.commandWorked(
+ conn.getDB("admin").runCommand({configureFailPoint: failPointName, mode: "off"}));
+
+ // Wait for the parallel shell to finish.
+ assert.eq(awaitShell(), 0);
+}
+
+function runUnshardedTest(whenMatched, whenNotMatched, conn) {
+ jsTestLog("Running unsharded test in whenMatched: " + whenMatched +
+ " whenNotMatched: " + whenNotMatched);
+ // The target collection will always be empty so we do not test the setting that will cause
+ // only failure.
+ if (whenNotMatched == "fail") {
+ return;
}
- // Run on a standalone.
+ const sourceColl = conn.getDB(kDBName)[kSourceCollName];
+ const destColl = conn.getDB(kDBName)[kDestCollName];
+ assert.commandWorked(destColl.remove({}));
+
+ // Be sure we're able to read from a cursor with a maxTimeMS set on it.
(function() {
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, 'mongod was unable to start up');
- insertDocs(conn.getDB(kDBName)[kSourceCollName]);
- withEachMergeMode(
- (mode) => runUnshardedTest(mode.whenMatchedMode, mode.whenNotMatchedMode, conn));
- MongoRunner.stopMongod(conn);
+ // Use a long maxTimeMS, since we expect the operation to finish.
+ const maxTimeMS = 1000 * 600;
+ const pipeline = [{
+ $merge:
+ {into: destColl.getName(), whenMatched: whenMatched, whenNotMatched: whenNotMatched}
+ }];
+ assert.doesNotThrow(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
})();
- // Runs a $merge against 'mongosConn' and verifies that the maxTimeMS value is included in the
- // command sent to mongod. Since the actual timeout can unreliably happen in mongos before even
- // reaching the shard, we instead set a very large timeout and verify that the command sent to
- // mongod includes the maxTimeMS.
- function runShardedTest(whenMatched, whenNotMatched, mongosConn, mongodConn, comment) {
- jsTestLog("Running sharded test in whenMatched: " + whenMatched + " whenNotMatched: " +
- whenNotMatched);
- // The target collection will always be empty so we do not test the setting that will cause
- // only failure.
- if (whenNotMatched == "fail") {
- return;
- }
-
- // Set a large timeout since we expect the command to finish.
- const maxTimeMS = 1000 * 20;
-
- const sourceColl = mongosConn.getDB(kDBName)[kSourceCollName];
- const destColl = mongosConn.getDB(kDBName)[kDestCollName];
- assert.commandWorked(destColl.remove({}));
-
- // Make sure we don't timeout in mongos before even reaching the shards.
- assert.commandWorked(mongosConn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
-
- const cursor = sourceColl.aggregate([{
- $merge: {
- into: destColl.getName(),
- whenMatched: whenMatched,
- whenNotMatched: whenNotMatched
- }
- }],
- {maxTimeMS: maxTimeMS, comment: comment});
- assert(!cursor.hasNext());
-
- // Filter the profiler entries on the existence of $merge, since aggregations through mongos
- // will include an extra aggregation with an empty pipeline to establish cursors on the
- // shards.
- assert.soon(function() {
- return mongodConn.getDB(kDBName)
- .system.profile
- .find({
- "command.aggregate": kSourceCollName,
- "command.pipeline.$merge": {"$exists": true},
- "command.comment": comment,
- "command.maxTimeMS": maxTimeMS,
- })
- .itcount() == 1;
- });
-
- assert.commandWorked(mongosConn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
+ assert.commandWorked(destColl.remove({}));
+
+ // Force the aggregation to hang while the batch is being written. The failpoint changes
+ // depending on the mode. If 'whenMatched' is set to "fail" then the implementation will end
+ // up issuing insert commands instead of updates.
+ const kFailPointName =
+ whenMatched == "fail" ? "hangDuringBatchInsert" : "hangDuringBatchUpdate";
+ forceAggregationToHangAndCheckMaxTimeMsExpires(
+ whenMatched, whenNotMatched, conn, kFailPointName);
+
+ assert.commandWorked(destColl.remove({}));
+
+ // Force the aggregation to hang while the batch is being built.
+ forceAggregationToHangAndCheckMaxTimeMsExpires(
+ whenMatched, whenNotMatched, conn, "hangWhileBuildingDocumentSourceMergeBatch");
+}
+
+// Run on a standalone.
+(function() {
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, 'mongod was unable to start up');
+insertDocs(conn.getDB(kDBName)[kSourceCollName]);
+withEachMergeMode((mode) => runUnshardedTest(mode.whenMatchedMode, mode.whenNotMatchedMode, conn));
+MongoRunner.stopMongod(conn);
+})();
+
+// Runs a $merge against 'mongosConn' and verifies that the maxTimeMS value is included in the
+// command sent to mongod. Since the actual timeout can unreliably happen in mongos before even
+// reaching the shard, we instead set a very large timeout and verify that the command sent to
+// mongod includes the maxTimeMS.
+function runShardedTest(whenMatched, whenNotMatched, mongosConn, mongodConn, comment) {
+ jsTestLog("Running sharded test in whenMatched: " + whenMatched +
+ " whenNotMatched: " + whenNotMatched);
+ // The target collection will always be empty so we do not test the setting that will cause
+ // only failure.
+ if (whenNotMatched == "fail") {
+ return;
}
- // Run on a sharded cluster.
- (function() {
- const st = new ShardingTest({shards: 2});
-
- // Ensure shard 0 is the primary shard. This is so that the $merge stage is guaranteed to
- // run on it.
- assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: kDBName}));
- st.ensurePrimaryShard(kDBName, st.shard0.name);
-
- // Set up the source collection to be sharded in a way such that each node will have some
- // documents for the remainder of the test.
- // shard 0: [MinKey, 5]
- // shard 1: [5, MaxKey]
- st.shardColl(kSourceCollName,
- {_id: 1}, // key
- {_id: 5}, // split
- {_id: 6}, // move
- kDBName);
- insertDocs(st.s.getDB(kDBName)[kSourceCollName]);
-
- // Start the profiler on each shard so that we can examine the $out's maxTimeMS.
- assert.commandWorked(st.shard0.getDB(kDBName).setProfilingLevel(2));
- assert.commandWorked(st.shard1.getDB(kDBName).setProfilingLevel(2));
-
- // // Run the test with 'destColl' unsharded.
- withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
- mode.whenNotMatchedMode,
- st.s,
- st.shard0,
- mode + "_unshardedDest"));
-
- // Run the test with 'destColl' sharded. This means that writes will be sent to both
- // shards, and if either one hangs, the MaxTimeMS will expire.
- // Shard the destination collection.
- st.shardColl(kDestCollName,
- {_id: 1}, // key
- {_id: 5}, // split
- {_id: 6}, // move
- kDBName);
-
- jsTestLog("Running test forcing shard " + st.shard0.name + " to hang");
- withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
- mode.whenNotMatchedMode,
- st.s,
- st.shard0,
- mode + "_shardedDest_" + st.shard0.name));
-
- jsTestLog("Running test forcing shard " + st.shard1.name + " to hang");
- withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
- mode.whenNotMatchedMode,
- st.s,
- st.shard1,
- mode + "_shardedDest_" + st.shard1.name));
-
- st.stop();
- })();
+ // Set a large timeout since we expect the command to finish.
+ const maxTimeMS = 1000 * 20;
+
+ const sourceColl = mongosConn.getDB(kDBName)[kSourceCollName];
+ const destColl = mongosConn.getDB(kDBName)[kDestCollName];
+ assert.commandWorked(destColl.remove({}));
+
+ // Make sure we don't timeout in mongos before even reaching the shards.
+ assert.commandWorked(mongosConn.getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
+
+ const cursor = sourceColl.aggregate(
+ [{
+ $merge:
+ {into: destColl.getName(), whenMatched: whenMatched, whenNotMatched: whenNotMatched}
+ }],
+ {maxTimeMS: maxTimeMS, comment: comment});
+ assert(!cursor.hasNext());
+
+ // Filter the profiler entries on the existence of $merge, since aggregations through mongos
+ // will include an extra aggregation with an empty pipeline to establish cursors on the
+ // shards.
+ assert.soon(function() {
+ return mongodConn.getDB(kDBName)
+ .system.profile
+ .find({
+ "command.aggregate": kSourceCollName,
+ "command.pipeline.$merge": {"$exists": true},
+ "command.comment": comment,
+ "command.maxTimeMS": maxTimeMS,
+ })
+ .itcount() == 1;
+ });
+
+ assert.commandWorked(mongosConn.getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
+}
+
+// Run on a sharded cluster.
+(function() {
+const st = new ShardingTest({shards: 2});
+
+// Ensure shard 0 is the primary shard. This is so that the $merge stage is guaranteed to
+// run on it.
+assert.commandWorked(st.s.getDB("admin").runCommand({enableSharding: kDBName}));
+st.ensurePrimaryShard(kDBName, st.shard0.name);
+
+// Set up the source collection to be sharded in a way such that each node will have some
+// documents for the remainder of the test.
+// shard 0: [MinKey, 5]
+// shard 1: [5, MaxKey]
+st.shardColl(kSourceCollName,
+ {_id: 1}, // key
+ {_id: 5}, // split
+ {_id: 6}, // move
+ kDBName);
+insertDocs(st.s.getDB(kDBName)[kSourceCollName]);
+
+// Start the profiler on each shard so that we can examine the $out's maxTimeMS.
+assert.commandWorked(st.shard0.getDB(kDBName).setProfilingLevel(2));
+assert.commandWorked(st.shard1.getDB(kDBName).setProfilingLevel(2));
+
+// // Run the test with 'destColl' unsharded.
+withEachMergeMode(
+ (mode) => runShardedTest(
+ mode.whenMatchedMode, mode.whenNotMatchedMode, st.s, st.shard0, mode + "_unshardedDest"));
+
+// Run the test with 'destColl' sharded. This means that writes will be sent to both
+// shards, and if either one hangs, the MaxTimeMS will expire.
+// Shard the destination collection.
+st.shardColl(kDestCollName,
+ {_id: 1}, // key
+ {_id: 5}, // split
+ {_id: 6}, // move
+ kDBName);
+
+jsTestLog("Running test forcing shard " + st.shard0.name + " to hang");
+withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
+ mode.whenNotMatchedMode,
+ st.s,
+ st.shard0,
+ mode + "_shardedDest_" + st.shard0.name));
+
+jsTestLog("Running test forcing shard " + st.shard1.name + " to hang");
+withEachMergeMode((mode) => runShardedTest(mode.whenMatchedMode,
+ mode.whenNotMatchedMode,
+ st.s,
+ st.shard1,
+ mode + "_shardedDest_" + st.shard1.name));
+
+st.stop();
+})();
})();
diff --git a/jstests/noPassthrough/minvalid2.js b/jstests/noPassthrough/minvalid2.js
index b9ce32fe153..b5f29a8a97c 100644
--- a/jstests/noPassthrough/minvalid2.js
+++ b/jstests/noPassthrough/minvalid2.js
@@ -65,8 +65,8 @@ printjson(lastOp);
// crash.
local.replset.minvalid.update({},
{
- ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1),
- t: NumberLong(-1),
+ ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1),
+ t: NumberLong(-1),
},
{upsert: true});
printjson(local.replset.minvalid.findOne());
diff --git a/jstests/noPassthrough/mongoebench_test.js b/jstests/noPassthrough/mongoebench_test.js
index 1ae0e6ba29f..cb531963e2b 100644
--- a/jstests/noPassthrough/mongoebench_test.js
+++ b/jstests/noPassthrough/mongoebench_test.js
@@ -2,50 +2,50 @@
* Tests for the mongoebench executable.
*/
(function() {
- "use strict";
-
- load("jstests/libs/mongoebench.js"); // for runMongoeBench
-
- if (jsTest.options().storageEngine !== "mobile") {
- print("Skipping test because storage engine isn't mobile");
- return;
- }
-
- const dbpath = MongoRunner.dataPath + "mongoebench_test";
- resetDbpath(dbpath);
-
- // Test that the operations in the "pre" section of the configuration are run exactly once.
- runMongoeBench( // Force clang-format to break this line.
- {
- pre: [{
- op: "insert",
- ns: "test.mongoebench_test",
- doc: {pre: {"#SEQ_INT": {seq_id: 0, start: 0, step: 1, unique: true}}}
- }],
- ops: [{
- op: "update",
- ns: "test.mongoebench_test",
- update: {$inc: {ops: 1}},
- multi: true,
- }]
- },
- {dbpath});
-
- const output = cat(dbpath + "/perf.json");
- const stats = assert.doesNotThrow(
- JSON.parse, [output], "failed to parse output file as strict JSON: " + output);
- assert.eq({$numberLong: "0"},
- stats.errCount,
- () => "stats file reports errors but exit code was zero: " + tojson(stats));
- assert(stats.hasOwnProperty("totalOps/s"),
- () => "stats file doesn't report ops per second: " + tojson(stats));
-
- const conn = MongoRunner.runMongod({dbpath, noCleanData: true});
- assert.neq(null, conn, "failed to start mongod after running mongoebench");
-
- const db = conn.getDB("test");
- const count = db.mongoebench_test.find().itcount();
- assert.eq(1, count, "ops in 'pre' section ran more than once or didn't run at all");
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+load("jstests/libs/mongoebench.js"); // for runMongoeBench
+
+if (jsTest.options().storageEngine !== "mobile") {
+ print("Skipping test because storage engine isn't mobile");
+ return;
+}
+
+const dbpath = MongoRunner.dataPath + "mongoebench_test";
+resetDbpath(dbpath);
+
+// Test that the operations in the "pre" section of the configuration are run exactly once.
+runMongoeBench( // Force clang-format to break this line.
+ {
+ pre: [{
+ op: "insert",
+ ns: "test.mongoebench_test",
+ doc: {pre: {"#SEQ_INT": {seq_id: 0, start: 0, step: 1, unique: true}}}
+ }],
+ ops: [{
+ op: "update",
+ ns: "test.mongoebench_test",
+ update: {$inc: {ops: 1}},
+ multi: true,
+ }]
+ },
+ {dbpath});
+
+const output = cat(dbpath + "/perf.json");
+const stats = assert.doesNotThrow(
+ JSON.parse, [output], "failed to parse output file as strict JSON: " + output);
+assert.eq({$numberLong: "0"},
+ stats.errCount,
+ () => "stats file reports errors but exit code was zero: " + tojson(stats));
+assert(stats.hasOwnProperty("totalOps/s"),
+ () => "stats file doesn't report ops per second: " + tojson(stats));
+
+const conn = MongoRunner.runMongod({dbpath, noCleanData: true});
+assert.neq(null, conn, "failed to start mongod after running mongoebench");
+
+const db = conn.getDB("test");
+const count = db.mongoebench_test.find().itcount();
+assert.eq(1, count, "ops in 'pre' section ran more than once or didn't run at all");
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js b/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js
index e2418738995..843d6e2631a 100644
--- a/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js
+++ b/jstests/noPassthrough/mongos_exhausts_stale_config_retries.js
@@ -3,61 +3,60 @@
//
// @tags: [requires_sharding]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const st = new ShardingTest({shards: 2, config: 1});
- const testDB = st.s.getDB(dbName);
-
- // Only testing the command read and write modes.
- assert(testDB.getMongo().readMode() === "commands");
- assert(testDB.getMongo().writeMode() === "commands");
-
- // Shard a collection with the only chunk on shard0.
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
-
- const sourcePrimary = st.rs0.getPrimary();
- const recipientPrimary = st.rs1.getPrimary();
-
- // Disable the best-effort recipient metadata refresh after migrations and move the chunk
- // between shards so the recipient shard, shard1, is stale.
- assert.commandWorked(sourcePrimary.adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
-
- // Disable metadata refreshes on the recipient shard so it will indefinitely return StaleConfig.
- assert.commandWorked(recipientPrimary.adminCommand(
- {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "alwaysOn"}));
-
- // Test various read and write commands that are sent with shard versions and thus can return
- // StaleConfig. Batch writes, i.e. insert/update/delete, return batch responses with ok:1 and
- // NoProgressMade write errors when retries are exhausted, so they are excluded.
- const kCommands = [
- {aggregate: collName, pipeline: [], cursor: {}},
- {count: collName},
- {distinct: collName, query: {}, key: "_id"},
- {find: collName},
- {findAndModify: collName, query: {_id: 0}, update: {$set: {x: 1}}},
- ];
-
- kCommands.forEach((cmd) => {
- // The recipient shard should return StaleConfig until mongos exhausts its retries and
- // returns the final StaleConfig to the client.
- assert.commandFailedWithCode(testDB.runCommand(cmd),
- ErrorCodes.StaleConfig,
- "expected to fail with StaleConfig, cmd: " + tojson(cmd));
- });
-
- assert.commandWorked(sourcePrimary.adminCommand(
- {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "off"}));
- assert.commandWorked(recipientPrimary.adminCommand(
- {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "off"}));
-
- st.stop();
+"use strict";
+
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
+
+const st = new ShardingTest({shards: 2, config: 1});
+const testDB = st.s.getDB(dbName);
+
+// Only testing the command read and write modes.
+assert(testDB.getMongo().readMode() === "commands");
+assert(testDB.getMongo().writeMode() === "commands");
+
+// Shard a collection with the only chunk on shard0.
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+
+const sourcePrimary = st.rs0.getPrimary();
+const recipientPrimary = st.rs1.getPrimary();
+
+// Disable the best-effort recipient metadata refresh after migrations and move the chunk
+// between shards so the recipient shard, shard1, is stale.
+assert.commandWorked(sourcePrimary.adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "alwaysOn"}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.shardName}));
+
+// Disable metadata refreshes on the recipient shard so it will indefinitely return StaleConfig.
+assert.commandWorked(recipientPrimary.adminCommand(
+ {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "alwaysOn"}));
+
+// Test various read and write commands that are sent with shard versions and thus can return
+// StaleConfig. Batch writes, i.e. insert/update/delete, return batch responses with ok:1 and
+// NoProgressMade write errors when retries are exhausted, so they are excluded.
+const kCommands = [
+ {aggregate: collName, pipeline: [], cursor: {}},
+ {count: collName},
+ {distinct: collName, query: {}, key: "_id"},
+ {find: collName},
+ {findAndModify: collName, query: {_id: 0}, update: {$set: {x: 1}}},
+];
+
+kCommands.forEach((cmd) => {
+ // The recipient shard should return StaleConfig until mongos exhausts its retries and
+ // returns the final StaleConfig to the client.
+ assert.commandFailedWithCode(testDB.runCommand(cmd),
+ ErrorCodes.StaleConfig,
+ "expected to fail with StaleConfig, cmd: " + tojson(cmd));
+});
+
+assert.commandWorked(sourcePrimary.adminCommand(
+ {configureFailPoint: "doNotRefreshRecipientAfterCommit", mode: "off"}));
+assert.commandWorked(recipientPrimary.adminCommand(
+ {configureFailPoint: "skipShardFilteringMetadataRefresh", mode: "off"}));
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/nested_tojson.js b/jstests/noPassthrough/nested_tojson.js
index c5137cd3cef..886e9cf3784 100644
--- a/jstests/noPassthrough/nested_tojson.js
+++ b/jstests/noPassthrough/nested_tojson.js
@@ -1,31 +1,30 @@
(function() {
- "use strict";
+"use strict";
- const tooMuchRecursion = (1 << 16);
+const tooMuchRecursion = (1 << 16);
- const nestobj = (depth) => {
- let doc = {};
- let cur = doc;
- for (let i = 0; i < depth; i++) {
- cur[i] = {};
- cur = cur[i];
- }
- cur['a'] = 'foo';
- return doc;
- };
+const nestobj = (depth) => {
+ let doc = {};
+ let cur = doc;
+ for (let i = 0; i < depth; i++) {
+ cur[i] = {};
+ cur = cur[i];
+ }
+ cur['a'] = 'foo';
+ return doc;
+};
- const nestarr = (depth) => {
- let doc = [0];
- let cur = doc;
- for (let i = 0; i < depth; i++) {
- cur[0] = [0];
- cur = cur[0];
- }
- cur[0] = 'foo';
- return doc;
- };
+const nestarr = (depth) => {
+ let doc = [0];
+ let cur = doc;
+ for (let i = 0; i < depth; i++) {
+ cur[0] = [0];
+ cur = cur[0];
+ }
+ cur[0] = 'foo';
+ return doc;
+};
- assert.doesNotThrow(
- tojson, [nestobj(tooMuchRecursion)], 'failed to print deeply nested object');
- assert.doesNotThrow(tojson, [nestarr(tooMuchRecursion)], 'failed to print deeply nested array');
+assert.doesNotThrow(tojson, [nestobj(tooMuchRecursion)], 'failed to print deeply nested object');
+assert.doesNotThrow(tojson, [nestarr(tooMuchRecursion)], 'failed to print deeply nested array');
})();
diff --git a/jstests/noPassthrough/non_atomic_apply_ops_logging.js b/jstests/noPassthrough/non_atomic_apply_ops_logging.js
index 77654a81bb9..64ff7159d3a 100644
--- a/jstests/noPassthrough/non_atomic_apply_ops_logging.js
+++ b/jstests/noPassthrough/non_atomic_apply_ops_logging.js
@@ -2,79 +2,79 @@
// and atomic ops are collectively logged in applyOps.
// @tags: [requires_replication]
(function() {
- "use strict";
+"use strict";
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- let primary = rst.getPrimary();
- let testDB = primary.getDB("test");
- let oplogColl = primary.getDB("local").oplog.rs;
- let testCollName = "testColl";
- let rerenamedCollName = "rerenamedColl";
+let primary = rst.getPrimary();
+let testDB = primary.getDB("test");
+let oplogColl = primary.getDB("local").oplog.rs;
+let testCollName = "testColl";
+let rerenamedCollName = "rerenamedColl";
- testDB.runCommand({drop: testCollName});
- testDB.runCommand({drop: rerenamedCollName});
- assert.commandWorked(testDB.runCommand({create: testCollName}));
- let testColl = testDB[testCollName];
+testDB.runCommand({drop: testCollName});
+testDB.runCommand({drop: rerenamedCollName});
+assert.commandWorked(testDB.runCommand({create: testCollName}));
+let testColl = testDB[testCollName];
- // Ensure atomic apply ops logging only produces one oplog entry
- // per call to apply ops and does not log individual operations
- // separately.
- assert.commandWorked(testDB.runCommand({
- applyOps: [
- {op: "i", ns: testColl.getFullName(), o: {_id: 1, a: "foo"}},
- {op: "i", ns: testColl.getFullName(), o: {_id: 2, a: "bar"}}
- ]
- }));
- assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
- assert.eq(oplogColl.find({op: "i", ns: testColl.getFullName()}).count(), 0);
- // Ensure non-atomic apply ops logging produces an oplog entry for
- // each operation in the apply ops call and no record of applyOps
- // appears for these operations.
- assert.commandWorked(testDB.runCommand({
- applyOps: [
- {
- op: "c",
- ns: "test.$cmd",
- o: {
- renameCollection: "test.testColl",
- to: "test.renamedColl",
- stayTemp: false,
- dropTarget: false
- }
- },
- {
- op: "c",
- ns: "test.$cmd",
- o: {
- renameCollection: "test.renamedColl",
- to: "test." + rerenamedCollName,
- stayTemp: false,
- dropTarget: false
- }
+// Ensure atomic apply ops logging only produces one oplog entry
+// per call to apply ops and does not log individual operations
+// separately.
+assert.commandWorked(testDB.runCommand({
+ applyOps: [
+ {op: "i", ns: testColl.getFullName(), o: {_id: 1, a: "foo"}},
+ {op: "i", ns: testColl.getFullName(), o: {_id: 2, a: "bar"}}
+ ]
+}));
+assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
+assert.eq(oplogColl.find({op: "i", ns: testColl.getFullName()}).count(), 0);
+// Ensure non-atomic apply ops logging produces an oplog entry for
+// each operation in the apply ops call and no record of applyOps
+// appears for these operations.
+assert.commandWorked(testDB.runCommand({
+ applyOps: [
+ {
+ op: "c",
+ ns: "test.$cmd",
+ o: {
+ renameCollection: "test.testColl",
+ to: "test.renamedColl",
+ stayTemp: false,
+ dropTarget: false
}
- ]
- }));
- assert.eq(oplogColl.find({"o.renameCollection": {"$exists": true}}).count(), 2);
- assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
+ },
+ {
+ op: "c",
+ ns: "test.$cmd",
+ o: {
+ renameCollection: "test.renamedColl",
+ to: "test." + rerenamedCollName,
+ stayTemp: false,
+ dropTarget: false
+ }
+ }
+ ]
+}));
+assert.eq(oplogColl.find({"o.renameCollection": {"$exists": true}}).count(), 2);
+assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
- // Ensure that applyOps respects the 'allowAtomic' boolean flag on CRUD operations that it would
- // have applied atomically.
- assert.commandWorked(testDB.createCollection(testColl.getName()));
- assert.commandFailedWithCode(testDB.runCommand({applyOps: [], allowAtomic: 'must be boolean'}),
- ErrorCodes.TypeMismatch,
- 'allowAtomic flag must be a boolean.');
- assert.commandWorked(testDB.runCommand({
- applyOps: [
- {op: "i", ns: testColl.getFullName(), o: {_id: 3, a: "augh"}},
- {op: "i", ns: testColl.getFullName(), o: {_id: 4, a: "blah"}}
- ],
- allowAtomic: false,
- }));
- assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
- assert.eq(oplogColl.find({op: "i", ns: testColl.getFullName()}).count(), 2);
+// Ensure that applyOps respects the 'allowAtomic' boolean flag on CRUD operations that it would
+// have applied atomically.
+assert.commandWorked(testDB.createCollection(testColl.getName()));
+assert.commandFailedWithCode(testDB.runCommand({applyOps: [], allowAtomic: 'must be boolean'}),
+ ErrorCodes.TypeMismatch,
+ 'allowAtomic flag must be a boolean.');
+assert.commandWorked(testDB.runCommand({
+ applyOps: [
+ {op: "i", ns: testColl.getFullName(), o: {_id: 3, a: "augh"}},
+ {op: "i", ns: testColl.getFullName(), o: {_id: 4, a: "blah"}}
+ ],
+ allowAtomic: false,
+}));
+assert.eq(oplogColl.find({"o.applyOps": {"$exists": true}}).count(), 1);
+assert.eq(oplogColl.find({op: "i", ns: testColl.getFullName()}).count(), 2);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/noncapped_oplog_creation.js b/jstests/noPassthrough/noncapped_oplog_creation.js
index 87dc37e6ed6..577074e1bb9 100644
--- a/jstests/noPassthrough/noncapped_oplog_creation.js
+++ b/jstests/noPassthrough/noncapped_oplog_creation.js
@@ -3,36 +3,36 @@
* oplog collection.
*/
(function() {
- 'use strict';
+'use strict';
- var dbpath = MongoRunner.dataPath + 'noncapped_oplog_creation';
- resetDbpath(dbpath);
+var dbpath = MongoRunner.dataPath + 'noncapped_oplog_creation';
+resetDbpath(dbpath);
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- });
- assert.neq(null, conn, 'mongod was unable to start up');
+var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+});
+assert.neq(null, conn, 'mongod was unable to start up');
- var localDB = conn.getDB('local');
+var localDB = conn.getDB('local');
- // Test that explicitly creating a non-capped oplog collection fails.
- assert.commandFailed(localDB.createCollection('oplog.fake', {capped: false}));
+// Test that explicitly creating a non-capped oplog collection fails.
+assert.commandFailed(localDB.createCollection('oplog.fake', {capped: false}));
- // Test that inserting into the replica set oplog fails when implicitly creating a non-capped
- // collection.
- assert.writeError(localDB.oplog.rs.insert({}));
+// Test that inserting into the replica set oplog fails when implicitly creating a non-capped
+// collection.
+assert.writeError(localDB.oplog.rs.insert({}));
- // Test that inserting into the master-slave oplog fails when implicitly creating a non-capped
- // collection.
- assert.commandFailed(localDB.runCommand({godinsert: 'oplog.$main', obj: {}}));
+// Test that inserting into the master-slave oplog fails when implicitly creating a non-capped
+// collection.
+assert.commandFailed(localDB.runCommand({godinsert: 'oplog.$main', obj: {}}));
- // Test that creating a non-capped oplog collection fails when using $out.
- assert.writeOK(localDB.input.insert({}));
- assert.commandFailed(localDB.runCommand({
- aggregate: 'input',
- pipeline: [{$out: 'oplog.aggregation'}],
- }));
+// Test that creating a non-capped oplog collection fails when using $out.
+assert.writeOK(localDB.input.insert({}));
+assert.commandFailed(localDB.runCommand({
+ aggregate: 'input',
+ pipeline: [{$out: 'oplog.aggregation'}],
+}));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/ns1.js b/jstests/noPassthrough/ns1.js
index 130ddf77db2..63c7baacb0f 100644
--- a/jstests/noPassthrough/ns1.js
+++ b/jstests/noPassthrough/ns1.js
@@ -1,51 +1,51 @@
(function() {
- "use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- let mydb = conn.getDB("test_ns1");
+"use strict";
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+let mydb = conn.getDB("test_ns1");
- const check = function(n, isNew) {
- var coll = mydb["x" + n];
- if (isNew) {
- assert.eq(0, coll.count(), "pop a: " + n);
- assert.writeOK(coll.insert({_id: n}));
- }
- assert.eq(1, coll.count(), "pop b: " + n);
- assert.eq(n, coll.findOne()._id, "pop c: " + n);
- return coll;
- };
+const check = function(n, isNew) {
+ var coll = mydb["x" + n];
+ if (isNew) {
+ assert.eq(0, coll.count(), "pop a: " + n);
+ assert.writeOK(coll.insert({_id: n}));
+ }
+ assert.eq(1, coll.count(), "pop b: " + n);
+ assert.eq(n, coll.findOne()._id, "pop c: " + n);
+ return coll;
+};
- let max = 0;
+let max = 0;
- for (; max < 1000; max++) {
- check(max, true);
- }
+for (; max < 1000; max++) {
+ check(max, true);
+}
- function checkall(removed) {
- for (var i = 0; i < max; i++) {
- if (removed == i) {
- assert.eq(0, mydb["x" + i].count(), "should be 0 : " + removed);
- } else {
- check(i, false);
- }
+function checkall(removed) {
+ for (var i = 0; i < max; i++) {
+ if (removed == i) {
+ assert.eq(0, mydb["x" + i].count(), "should be 0 : " + removed);
+ } else {
+ check(i, false);
}
}
+}
- checkall();
+checkall();
- Random.srand(123124);
- const its = max / 2;
- print("its: " + its);
- for (let i = 0; i < its; i++) {
- const x = Random.randInt(max);
- check(x, false).drop();
- checkall(x);
- check(x, true);
- if ((i + 1) % 20 == 0) {
- print(i + "/" + its);
- }
+Random.srand(123124);
+const its = max / 2;
+print("its: " + its);
+for (let i = 0; i < its; i++) {
+ const x = Random.randInt(max);
+ check(x, false).drop();
+ checkall(x);
+ check(x, true);
+ if ((i + 1) % 20 == 0) {
+ print(i + "/" + its);
}
- print("yay");
+}
+print("yay");
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
index 50e1be2c262..22fe3b12276 100644
--- a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
+++ b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
@@ -5,72 +5,71 @@
* @tags: [requires_journaling, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/analyze_plan.js'); // For aggPlanHasStage().
+load('jstests/libs/analyze_plan.js'); // For aggPlanHasStage().
- // Set up a 2-shard cluster.
- const st = new ShardingTest({name: jsTestName(), shards: 2, rs: {nodes: 1}});
+// Set up a 2-shard cluster.
+const st = new ShardingTest({name: jsTestName(), shards: 2, rs: {nodes: 1}});
- // Obtain a connection to the mongoS and one direct connection to each shard.
- const shard0 = st.rs0.getPrimary();
- const shard1 = st.rs1.getPrimary();
- const mongos = st.s;
+// Obtain a connection to the mongoS and one direct connection to each shard.
+const shard0 = st.rs0.getPrimary();
+const shard1 = st.rs1.getPrimary();
+const mongos = st.s;
- const configDB = mongos.getDB("config");
+const configDB = mongos.getDB("config");
- const mongosDB = mongos.getDB(jsTestName());
- const mongosColl = mongosDB.test;
+const mongosDB = mongos.getDB(jsTestName());
+const mongosColl = mongosDB.test;
- const shard0DB = shard0.getDB(jsTestName());
- const shard0Coll = shard0DB.test;
+const shard0DB = shard0.getDB(jsTestName());
+const shard0Coll = shard0DB.test;
- const shard1DB = shard1.getDB(jsTestName());
- const shard1Coll = shard1DB.test;
+const shard1DB = shard1.getDB(jsTestName());
+const shard1Coll = shard1DB.test;
- const shard1AdminDB = shard1.getDB("admin");
+const shard1AdminDB = shard1.getDB("admin");
- const shardNames = [st.rs0.name, st.rs1.name];
+const shardNames = [st.rs0.name, st.rs1.name];
- // Helper function that runs a $sample aggregation, confirms that the results are correct, and
- // verifies that the expected optimized or unoptimized $sample stage ran on each shard.
- function runSampleAndConfirmResults({sampleSize, comment, expectedPlanSummaries}) {
- // Run the aggregation via mongoS with the given 'comment' parameter.
- assert.eq(
- mongosColl.aggregate([{$sample: {size: sampleSize}}], {comment: comment}).itcount(),
- sampleSize);
+// Helper function that runs a $sample aggregation, confirms that the results are correct, and
+// verifies that the expected optimized or unoptimized $sample stage ran on each shard.
+function runSampleAndConfirmResults({sampleSize, comment, expectedPlanSummaries}) {
+ // Run the aggregation via mongoS with the given 'comment' parameter.
+ assert.eq(mongosColl.aggregate([{$sample: {size: sampleSize}}], {comment: comment}).itcount(),
+ sampleSize);
- // Obtain the explain output for the aggregation.
- const explainOut =
- assert.commandWorked(mongosColl.explain().aggregate([{$sample: {size: sampleSize}}]));
+ // Obtain the explain output for the aggregation.
+ const explainOut =
+ assert.commandWorked(mongosColl.explain().aggregate([{$sample: {size: sampleSize}}]));
- // Verify that the expected $sample stage, optimized or unoptimized, ran on each shard.
- for (let idx in expectedPlanSummaries) {
- const shardExplain = explainOut.shards[shardNames[idx]];
- for (let planSummary of expectedPlanSummaries[idx]) {
- assert(aggPlanHasStage(shardExplain, planSummary));
- }
+ // Verify that the expected $sample stage, optimized or unoptimized, ran on each shard.
+ for (let idx in expectedPlanSummaries) {
+ const shardExplain = explainOut.shards[shardNames[idx]];
+ for (let planSummary of expectedPlanSummaries[idx]) {
+ assert(aggPlanHasStage(shardExplain, planSummary));
}
}
+}
- // Enable sharding on the the test database and ensure that the primary is shard0.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
- st.ensurePrimaryShard(mongosDB.getName(), shard0.name);
+// Enable sharding on the the test database and ensure that the primary is shard0.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+st.ensurePrimaryShard(mongosDB.getName(), shard0.name);
- // Shard the collection on {_id: 1}, split at {_id: 0} and move the empty upper chunk to shard1.
- st.shardColl(mongosColl.getName(), {_id: 1}, {_id: 0}, {_id: 0}, mongosDB.getName());
+// Shard the collection on {_id: 1}, split at {_id: 0} and move the empty upper chunk to shard1.
+st.shardColl(mongosColl.getName(), {_id: 1}, {_id: 0}, {_id: 0}, mongosDB.getName());
- // Write some documents to the lower chunk on shard0.
- for (let i = (-200); i < 0; ++i) {
- assert.commandWorked(mongosColl.insert({_id: i}));
- }
+// Write some documents to the lower chunk on shard0.
+for (let i = (-200); i < 0; ++i) {
+ assert.commandWorked(mongosColl.insert({_id: i}));
+}
- // Set a failpoint to hang after cloning documents to shard1 but before committing.
- shard0DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "alwaysOn"});
- shard1DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "alwaysOn"});
+// Set a failpoint to hang after cloning documents to shard1 but before committing.
+shard0DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "alwaysOn"});
+shard1DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "alwaysOn"});
- // Spawn a parallel shell to move the lower chunk from shard0 to shard1.
- const awaitMoveChunkShell = startParallelShell(`
+// Spawn a parallel shell to move the lower chunk from shard0 to shard1.
+const awaitMoveChunkShell = startParallelShell(`
assert.commandWorked(db.adminCommand({
moveChunk: "${mongosColl.getFullName()}",
find: {_id: -1},
@@ -80,75 +79,70 @@
`,
mongosDB.getMongo().port);
- // Wait until we see that all documents have been cloned to shard1.
- assert.soon(() => {
- return shard0Coll.find().itcount() === shard1Coll.find().itcount();
- });
-
- // Confirm that shard0 still owns the chunk, according to the config DB metadata.
- assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs0`}), 1);
-
- // Run a $sample aggregation without committing the chunk migration. We expect to see that the
- // optimized $sample stage was used on shard0, which own the documents. Despite the fact that
- // there are 200 documents on shard1 and we should naively have used the random-cursor
- // optimization, confirm that we instead detected that the documents were orphans and used the
- // non-optimized $sample stage.
- runSampleAndConfirmResults({
- sampleSize: 1,
- comment: "sample_with_only_orphans_on_shard1",
- expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["COLLSCAN"]]
- });
-
- // Confirm that shard0 still owns the chunk.
- assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs0`}), 1);
-
- // Release the failpoints and wait for the parallel moveChunk shell to complete.
- shard0DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "off"});
- shard1DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "off"});
- awaitMoveChunkShell();
-
- // Confirm that shard1 now owns the chunk.
- assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
-
- // Move the lower chunk back to shard0.
- assert.commandWorked(mongosDB.adminCommand({
- moveChunk: mongosColl.getFullName(),
- find: {_id: -1},
- to: shard0.name,
- waitForDelete: true
- }));
-
- // Write 1 legitimate document and 100 orphans directly to shard1, which owns the upper chunk.
- assert.eq(configDB.chunks.count({min: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
- for (let i = -100; i < 1; ++i) {
- assert.commandWorked(shard1Coll.insert({_id: i}));
- }
-
- // Confirm that there are 101 documents on shard1 and mongoS can see the 1 non-orphan.
- assert.eq(mongosColl.find({_id: {$gte: 0}}).itcount(), 1);
- assert.eq(shard1Coll.count(), 101);
-
- // Re-run the $sample aggregation. On shard1 we should again use the non-optimized stage, since
- // despite the fact that there are 101 documents present, only 1 is owned by the shard.
- runSampleAndConfirmResults({
- sampleSize: 1,
- comment: "sample_with_1_doc_100_orphans_on_shard1",
- expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["COLLSCAN"]]
- });
-
- // Write 199 additional documents to the upper chunk which still resides on shard1.
- assert.eq(configDB.chunks.count({min: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
- for (let i = 1; i < 200; ++i) {
- assert.commandWorked(mongosColl.insert({_id: i}));
- }
-
- // Re-run the $sample aggregation and confirm that the optimized stage now runs on both shards.
- runSampleAndConfirmResults({
- sampleSize: 1,
- comment: "sample_with_200_docs_100_orphans_on_shard1",
- expectedPlanSummaries:
- [["QUEUED_DATA", "MULTI_ITERATOR"], ["QUEUED_DATA", "MULTI_ITERATOR"]]
- });
-
- st.stop();
+// Wait until we see that all documents have been cloned to shard1.
+assert.soon(() => {
+ return shard0Coll.find().itcount() === shard1Coll.find().itcount();
+});
+
+// Confirm that shard0 still owns the chunk, according to the config DB metadata.
+assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs0`}), 1);
+
+// Run a $sample aggregation without committing the chunk migration. We expect to see that the
+// optimized $sample stage was used on shard0, which own the documents. Despite the fact that
+// there are 200 documents on shard1 and we should naively have used the random-cursor
+// optimization, confirm that we instead detected that the documents were orphans and used the
+// non-optimized $sample stage.
+runSampleAndConfirmResults({
+ sampleSize: 1,
+ comment: "sample_with_only_orphans_on_shard1",
+ expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["COLLSCAN"]]
+});
+
+// Confirm that shard0 still owns the chunk.
+assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs0`}), 1);
+
+// Release the failpoints and wait for the parallel moveChunk shell to complete.
+shard0DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "off"});
+shard1DB.adminCommand({configureFailPoint: "moveChunkHangAtStep4", mode: "off"});
+awaitMoveChunkShell();
+
+// Confirm that shard1 now owns the chunk.
+assert.eq(configDB.chunks.count({max: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
+
+// Move the lower chunk back to shard0.
+assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: -1}, to: shard0.name, waitForDelete: true}));
+
+// Write 1 legitimate document and 100 orphans directly to shard1, which owns the upper chunk.
+assert.eq(configDB.chunks.count({min: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
+for (let i = -100; i < 1; ++i) {
+ assert.commandWorked(shard1Coll.insert({_id: i}));
+}
+
+// Confirm that there are 101 documents on shard1 and mongoS can see the 1 non-orphan.
+assert.eq(mongosColl.find({_id: {$gte: 0}}).itcount(), 1);
+assert.eq(shard1Coll.count(), 101);
+
+// Re-run the $sample aggregation. On shard1 we should again use the non-optimized stage, since
+// despite the fact that there are 101 documents present, only 1 is owned by the shard.
+runSampleAndConfirmResults({
+ sampleSize: 1,
+ comment: "sample_with_1_doc_100_orphans_on_shard1",
+ expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["COLLSCAN"]]
+});
+
+// Write 199 additional documents to the upper chunk which still resides on shard1.
+assert.eq(configDB.chunks.count({min: {_id: 0}, shard: `${jsTestName()}-rs1`}), 1);
+for (let i = 1; i < 200; ++i) {
+ assert.commandWorked(mongosColl.insert({_id: i}));
+}
+
+// Re-run the $sample aggregation and confirm that the optimized stage now runs on both shards.
+runSampleAndConfirmResults({
+ sampleSize: 1,
+ comment: "sample_with_200_docs_100_orphans_on_shard1",
+ expectedPlanSummaries: [["QUEUED_DATA", "MULTI_ITERATOR"], ["QUEUED_DATA", "MULTI_ITERATOR"]]
+});
+
+st.stop();
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/out_majority_read_replset.js b/jstests/noPassthrough/out_majority_read_replset.js
index 6452a8c93f9..222bc3a0503 100644
--- a/jstests/noPassthrough/out_majority_read_replset.js
+++ b/jstests/noPassthrough/out_majority_read_replset.js
@@ -1,44 +1,44 @@
// Tests the $out and read concern majority.
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries.
- const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
+const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ rst.stopSet();
+ return;
+}
- rst.initiate();
+rst.initiate();
- const name = "out_majority_read";
- const db = rst.getPrimary().getDB(name);
+const name = "out_majority_read";
+const db = rst.getPrimary().getDB(name);
- const sourceColl = db.sourceColl;
+const sourceColl = db.sourceColl;
- assert.commandWorked(sourceColl.insert({_id: 1, state: 'before'}));
- rst.awaitLastOpCommitted();
+assert.commandWorked(sourceColl.insert({_id: 1, state: 'before'}));
+rst.awaitLastOpCommitted();
- stopReplicationOnSecondaries(rst);
+stopReplicationOnSecondaries(rst);
- // Create the index that is not majority commited
- assert.commandWorked(sourceColl.createIndex({state: 1}, {name: "secondIndex"}));
+// Create the index that is not majority commited
+assert.commandWorked(sourceColl.createIndex({state: 1}, {name: "secondIndex"}));
- // Run the $out in the parallel shell as it will block in the metadata until the shapshot is
- // advanced.
- const awaitShell = startParallelShell(`{
+// Run the $out in the parallel shell as it will block in the metadata until the shapshot is
+// advanced.
+const awaitShell = startParallelShell(`{
const testDB = db.getSiblingDB("${name}");
const sourceColl = testDB.sourceColl;
@@ -55,17 +55,17 @@
}`,
db.getMongo().port);
- // Wait for the $out before restarting the replication.
- assert.soon(function() {
- const filter = {"command.aggregate": "sourceColl"};
- return assert.commandWorked(db.currentOp(filter)).inprog.length === 1;
- });
+// Wait for the $out before restarting the replication.
+assert.soon(function() {
+ const filter = {"command.aggregate": "sourceColl"};
+ return assert.commandWorked(db.currentOp(filter)).inprog.length === 1;
+});
- // Restart data replicaiton and wait until the new write becomes visible.
- restartReplicationOnSecondaries(rst);
- rst.awaitLastOpCommitted();
+// Restart data replicaiton and wait until the new write becomes visible.
+restartReplicationOnSecondaries(rst);
+rst.awaitLastOpCommitted();
- awaitShell();
+awaitShell();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/out_max_time_ms.js b/jstests/noPassthrough/out_max_time_ms.js
index 578ab60a6e2..29c00b6834f 100644
--- a/jstests/noPassthrough/out_max_time_ms.js
+++ b/jstests/noPassthrough/out_max_time_ms.js
@@ -3,126 +3,125 @@
* @tags: [requires_sharding, requires_replication]
*/
(function() {
- load("jstests/libs/fixture_helpers.js"); // For isMongos().
- load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
-
- const kDBName = "test";
- const kSourceCollName = "out_max_time_ms_source";
- const kDestCollName = "out_max_time_ms_dest";
- const nDocs = 10;
-
- /**
- * Helper for populating the collection.
- */
- function insertDocs(coll) {
- for (let i = 0; i < nDocs; i++) {
- assert.commandWorked(coll.insert({_id: i}));
- }
- }
+load("jstests/libs/fixture_helpers.js"); // For isMongos().
+load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow.
- /**
- * Wait until the server sets its CurOp "msg" to the failpoint name, indicating that it's
- * hanging.
- */
- function waitUntilServerHangsOnFailPoint(conn, fpName) {
- // Be sure that the server is hanging on the failpoint.
- assert.soon(function() {
- const filter = {"msg": fpName};
- const ops = conn.getDB("admin")
- .aggregate([{$currentOp: {allUsers: true}}, {$match: filter}])
- .toArray();
- return ops.length == 1;
- });
- }
+const kDBName = "test";
+const kSourceCollName = "out_max_time_ms_source";
+const kDestCollName = "out_max_time_ms_dest";
+const nDocs = 10;
- /**
- * Given a mongod connection, run a $out aggregation against 'conn' which hangs on the given
- * failpoint and ensure that the $out maxTimeMS expires.
- */
- function forceAggregationToHangAndCheckMaxTimeMsExpires(conn, failPointName) {
- // Use a short maxTimeMS so that the test completes in a reasonable amount of time. We will
- // use the 'maxTimeNeverTimeOut' failpoint to ensure that the operation does not prematurely
- // time out.
- const maxTimeMS = 1000 * 2;
-
- // Enable a failPoint so that the write will hang.
- let failpointCommand = {
- configureFailPoint: failPointName,
- mode: "alwaysOn",
- };
-
- assert.commandWorked(conn.getDB("admin").runCommand(failpointCommand));
-
- // Make sure we don't run out of time before the failpoint is hit.
- assert.commandWorked(conn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
-
- // Build the parallel shell function.
- let shellStr = `const sourceColl = db['${kSourceCollName}'];`;
- shellStr += `const destColl = db['${kDestCollName}'];`;
- shellStr += `const maxTimeMS = ${maxTimeMS};`;
- const runAggregate = function() {
- const pipeline = [{$out: destColl.getName()}];
- const err = assert.throws(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
- assert.eq(err.code, ErrorCodes.MaxTimeMSExpired, "expected aggregation to fail");
- };
- shellStr += `(${runAggregate.toString()})();`;
- const awaitShell = startParallelShell(shellStr, conn.port);
-
- waitUntilServerHangsOnFailPoint(conn, failPointName);
-
- assert.commandWorked(conn.getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
-
- // The aggregation running in the parallel shell will hang on the failpoint, burning
- // its time. Wait until the maxTimeMS has definitely expired.
- sleep(maxTimeMS + 2000);
-
- // Now drop the failpoint, allowing the aggregation to proceed. It should hit an
- // interrupt check and terminate immediately.
- assert.commandWorked(
- conn.getDB("admin").runCommand({configureFailPoint: failPointName, mode: "off"}));
-
- // Wait for the parallel shell to finish.
- assert.eq(awaitShell(), 0);
+/**
+ * Helper for populating the collection.
+ */
+function insertDocs(coll) {
+ for (let i = 0; i < nDocs; i++) {
+ assert.commandWorked(coll.insert({_id: i}));
}
+}
- function runUnshardedTest(conn) {
- jsTestLog("Running unsharded test");
-
- const sourceColl = conn.getDB(kDBName)[kSourceCollName];
- const destColl = conn.getDB(kDBName)[kDestCollName];
- assert.commandWorked(destColl.remove({}));
+/**
+ * Wait until the server sets its CurOp "msg" to the failpoint name, indicating that it's
+ * hanging.
+ */
+function waitUntilServerHangsOnFailPoint(conn, fpName) {
+ // Be sure that the server is hanging on the failpoint.
+ assert.soon(function() {
+ const filter = {"msg": fpName};
+ const ops = conn.getDB("admin")
+ .aggregate([{$currentOp: {allUsers: true}}, {$match: filter}])
+ .toArray();
+ return ops.length == 1;
+ });
+}
- // Be sure we're able to read from a cursor with a maxTimeMS set on it.
- (function() {
- // Use a long maxTimeMS, since we expect the operation to finish.
- const maxTimeMS = 1000 * 600;
- const pipeline = [{$out: destColl.getName()}];
- const cursor = sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS});
- assert(!cursor.hasNext());
- assert.eq(destColl.countDocuments({_id: {$exists: true}}), nDocs);
- })();
+/**
+ * Given a mongod connection, run a $out aggregation against 'conn' which hangs on the given
+ * failpoint and ensure that the $out maxTimeMS expires.
+ */
+function forceAggregationToHangAndCheckMaxTimeMsExpires(conn, failPointName) {
+ // Use a short maxTimeMS so that the test completes in a reasonable amount of time. We will
+ // use the 'maxTimeNeverTimeOut' failpoint to ensure that the operation does not prematurely
+ // time out.
+ const maxTimeMS = 1000 * 2;
+
+ // Enable a failPoint so that the write will hang.
+ let failpointCommand = {
+ configureFailPoint: failPointName,
+ mode: "alwaysOn",
+ };
+
+ assert.commandWorked(conn.getDB("admin").runCommand(failpointCommand));
+
+ // Make sure we don't run out of time before the failpoint is hit.
+ assert.commandWorked(conn.getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}));
+
+ // Build the parallel shell function.
+ let shellStr = `const sourceColl = db['${kSourceCollName}'];`;
+ shellStr += `const destColl = db['${kDestCollName}'];`;
+ shellStr += `const maxTimeMS = ${maxTimeMS};`;
+ const runAggregate = function() {
+ const pipeline = [{$out: destColl.getName()}];
+ const err = assert.throws(() => sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS}));
+ assert.eq(err.code, ErrorCodes.MaxTimeMSExpired, "expected aggregation to fail");
+ };
+ shellStr += `(${runAggregate.toString()})();`;
+ const awaitShell = startParallelShell(shellStr, conn.port);
+
+ waitUntilServerHangsOnFailPoint(conn, failPointName);
+
+ assert.commandWorked(
+ conn.getDB("admin").runCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}));
+
+ // The aggregation running in the parallel shell will hang on the failpoint, burning
+ // its time. Wait until the maxTimeMS has definitely expired.
+ sleep(maxTimeMS + 2000);
+
+ // Now drop the failpoint, allowing the aggregation to proceed. It should hit an
+ // interrupt check and terminate immediately.
+ assert.commandWorked(
+ conn.getDB("admin").runCommand({configureFailPoint: failPointName, mode: "off"}));
+
+ // Wait for the parallel shell to finish.
+ assert.eq(awaitShell(), 0);
+}
+
+function runUnshardedTest(conn) {
+ jsTestLog("Running unsharded test");
+
+ const sourceColl = conn.getDB(kDBName)[kSourceCollName];
+ const destColl = conn.getDB(kDBName)[kDestCollName];
+ assert.commandWorked(destColl.remove({}));
+
+ // Be sure we're able to read from a cursor with a maxTimeMS set on it.
+ (function() {
+ // Use a long maxTimeMS, since we expect the operation to finish.
+ const maxTimeMS = 1000 * 600;
+ const pipeline = [{$out: destColl.getName()}];
+ const cursor = sourceColl.aggregate(pipeline, {maxTimeMS: maxTimeMS});
+ assert(!cursor.hasNext());
+ assert.eq(destColl.countDocuments({_id: {$exists: true}}), nDocs);
+ })();
- assert.commandWorked(destColl.remove({}));
+ assert.commandWorked(destColl.remove({}));
- // Force the aggregation to hang while the batch is being written.
- const kFailPointName = "hangDuringBatchInsert";
- forceAggregationToHangAndCheckMaxTimeMsExpires(conn, kFailPointName);
+ // Force the aggregation to hang while the batch is being written.
+ const kFailPointName = "hangDuringBatchInsert";
+ forceAggregationToHangAndCheckMaxTimeMsExpires(conn, kFailPointName);
- assert.commandWorked(destColl.remove({}));
+ assert.commandWorked(destColl.remove({}));
- // Force the aggregation to hang while the batch is being built.
- forceAggregationToHangAndCheckMaxTimeMsExpires(conn,
- "hangWhileBuildingDocumentSourceOutBatch");
- }
+ // Force the aggregation to hang while the batch is being built.
+ forceAggregationToHangAndCheckMaxTimeMsExpires(conn, "hangWhileBuildingDocumentSourceOutBatch");
+}
- // Run on a standalone.
- (function() {
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, 'mongod was unable to start up');
- insertDocs(conn.getDB(kDBName)[kSourceCollName]);
- runUnshardedTest(conn);
- MongoRunner.stopMongod(conn);
- })();
+// Run on a standalone.
+(function() {
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, 'mongod was unable to start up');
+insertDocs(conn.getDB(kDBName)[kSourceCollName]);
+runUnshardedTest(conn);
+MongoRunner.stopMongod(conn);
+})();
})();
diff --git a/jstests/noPassthrough/out_merge_majority_read.js b/jstests/noPassthrough/out_merge_majority_read.js
index 125219841b3..a525a192a42 100644
--- a/jstests/noPassthrough/out_merge_majority_read.js
+++ b/jstests/noPassthrough/out_merge_majority_read.js
@@ -8,86 +8,86 @@
*/
(function() {
- 'use strict';
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
-
- const testServer = MongoRunner.runMongod();
- const db = testServer.getDB("test");
- if (!db.serverStatus().storageEngine.supportsCommittedReads) {
- print("Skipping read_majority.js since storageEngine doesn't support it.");
- MongoRunner.stopMongod(testServer);
- return;
- }
+'use strict';
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+const testServer = MongoRunner.runMongod();
+const db = testServer.getDB("test");
+if (!db.serverStatus().storageEngine.supportsCommittedReads) {
+ print("Skipping read_majority.js since storageEngine doesn't support it.");
MongoRunner.stopMongod(testServer);
+ return;
+}
+MongoRunner.stopMongod(testServer);
- function runTests(sourceColl, mongodConnection) {
- function makeSnapshot() {
- return assert.commandWorked(mongodConnection.adminCommand("makeSnapshot")).name;
- }
- function setCommittedSnapshot(snapshot) {
- assert.commandWorked(mongodConnection.adminCommand({"setCommittedSnapshot": snapshot}));
- }
- const db = sourceColl.getDB();
- const targetColl = db.targetColl;
- const targetReplaceDocsColl = db.targetReplaceDocsColl;
-
- assert.commandWorked(sourceColl.remove({}));
- assert.commandWorked(targetColl.remove({}));
- assert.commandWorked(targetReplaceDocsColl.remove({}));
- setCommittedSnapshot(makeSnapshot());
-
- // Insert a single document and make it visible by advancing the snapshot.
- assert.commandWorked(sourceColl.insert({_id: 1, state: 'before'}));
- assert.commandWorked(targetReplaceDocsColl.insert({_id: 1, state: 'before'}));
- setCommittedSnapshot(makeSnapshot());
-
- // This insert will not be visible to $merge.
- assert.commandWorked(sourceColl.insert({_id: 2, state: 'before'}));
- assert.commandWorked(targetReplaceDocsColl.insert({_id: 2, state: 'before'}));
- // Similarly this update will not be visible.
- assert.commandWorked(sourceColl.update({_id: 1}, {state: 'after'}));
- assert.commandWorked(targetReplaceDocsColl.update({_id: 1}, {state: 'after'}));
-
- // Make sure we see only the first document.
- let res = sourceColl.aggregate([], {readConcern: {level: 'majority'}});
- assert.eq(res.itcount(), 1);
-
- // Run $merge with whenNotMatched set to "insert". It will pick only the first document.
- // Also it will not see the update ('after').
- res = sourceColl.aggregate(
- [
- {$match: {state: 'before'}},
- {$project: {state: 'merge'}},
- {
+function runTests(sourceColl, mongodConnection) {
+ function makeSnapshot() {
+ return assert.commandWorked(mongodConnection.adminCommand("makeSnapshot")).name;
+ }
+ function setCommittedSnapshot(snapshot) {
+ assert.commandWorked(mongodConnection.adminCommand({"setCommittedSnapshot": snapshot}));
+ }
+ const db = sourceColl.getDB();
+ const targetColl = db.targetColl;
+ const targetReplaceDocsColl = db.targetReplaceDocsColl;
+
+ assert.commandWorked(sourceColl.remove({}));
+ assert.commandWorked(targetColl.remove({}));
+ assert.commandWorked(targetReplaceDocsColl.remove({}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // Insert a single document and make it visible by advancing the snapshot.
+ assert.commandWorked(sourceColl.insert({_id: 1, state: 'before'}));
+ assert.commandWorked(targetReplaceDocsColl.insert({_id: 1, state: 'before'}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // This insert will not be visible to $merge.
+ assert.commandWorked(sourceColl.insert({_id: 2, state: 'before'}));
+ assert.commandWorked(targetReplaceDocsColl.insert({_id: 2, state: 'before'}));
+ // Similarly this update will not be visible.
+ assert.commandWorked(sourceColl.update({_id: 1}, {state: 'after'}));
+ assert.commandWorked(targetReplaceDocsColl.update({_id: 1}, {state: 'after'}));
+
+ // Make sure we see only the first document.
+ let res = sourceColl.aggregate([], {readConcern: {level: 'majority'}});
+ assert.eq(res.itcount(), 1);
+
+ // Run $merge with whenNotMatched set to "insert". It will pick only the first document.
+ // Also it will not see the update ('after').
+ res = sourceColl.aggregate(
+ [
+ {$match: {state: 'before'}},
+ {$project: {state: 'merge'}},
+ {
$merge: {
into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
whenMatched: "fail",
whenNotMatched: "insert"
}
- }
- ],
- {readConcern: {level: 'majority'}});
-
- assert.eq(res.itcount(), 0);
-
- res = targetColl.find().sort({_id: 1});
- // Only a single document is visible ($merge did not see the second insert).
- assert.docEq(res.next(), {_id: 1, state: 'merge'});
- assert(res.isExhausted());
-
- // The same $merge but with whenMatched set to "replace".
- res = sourceColl.aggregate(
- [
- {$match: {state: 'before'}},
- {$project: {state: 'merge'}},
- {
+ }
+ ],
+ {readConcern: {level: 'majority'}});
+
+ assert.eq(res.itcount(), 0);
+
+ res = targetColl.find().sort({_id: 1});
+ // Only a single document is visible ($merge did not see the second insert).
+ assert.docEq(res.next(), {_id: 1, state: 'merge'});
+ assert(res.isExhausted());
+
+ // The same $merge but with whenMatched set to "replace".
+ res = sourceColl.aggregate(
+ [
+ {$match: {state: 'before'}},
+ {$project: {state: 'merge'}},
+ {
$merge: {
into: {
db: targetReplaceDocsColl.getDB().getName(),
@@ -96,120 +96,120 @@
whenMatched: "replace",
whenNotMatched: "insert"
}
- }
- ],
- {readConcern: {level: 'majority'}});
- assert.eq(res.itcount(), 0);
-
- setCommittedSnapshot(makeSnapshot());
-
- res = targetReplaceDocsColl.find().sort({_id: 1});
- // The first document must overwrite the update that the read portion of $merge did not see.
- assert.docEq(res.next(), {_id: 1, state: 'merge'});
- // The second document is the result of the independent insert that $merge did not see.
- assert.docEq(res.next(), {_id: 2, state: 'before'});
- assert(res.isExhausted());
-
- assert.commandWorked(targetColl.remove({}));
- setCommittedSnapshot(makeSnapshot());
-
- // Insert a document that will collide with $merge insert. The insert is not majority
- // commited.
- assert.commandWorked(targetColl.insert({_id: 1, state: 'collision'}));
-
- res = db.runCommand({
- aggregate: sourceColl.getName(),
- pipeline: [
- {$project: {state: 'merge'}},
- {
- $merge: {
- into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
- whenMatched: "fail",
- whenNotMatched: "insert"
- }
- }
- ],
- cursor: {},
- readConcern: {level: 'majority'}
- });
-
- assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
-
- // Remove the documents (not majority).
- assert.commandWorked(targetColl.remove({_id: 1}));
- assert.commandWorked(targetColl.remove({_id: 2}));
-
- // $merge should successfuly 'overwrite' the collection as it is 'empty' (not majority).
- res = targetReplaceDocsColl.aggregate(
- [
- {$match: {state: 'before'}},
- {$project: {state: 'merge'}},
- {
+ }
+ ],
+ {readConcern: {level: 'majority'}});
+ assert.eq(res.itcount(), 0);
+
+ setCommittedSnapshot(makeSnapshot());
+
+ res = targetReplaceDocsColl.find().sort({_id: 1});
+ // The first document must overwrite the update that the read portion of $merge did not see.
+ assert.docEq(res.next(), {_id: 1, state: 'merge'});
+ // The second document is the result of the independent insert that $merge did not see.
+ assert.docEq(res.next(), {_id: 2, state: 'before'});
+ assert(res.isExhausted());
+
+ assert.commandWorked(targetColl.remove({}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // Insert a document that will collide with $merge insert. The insert is not majority
+ // commited.
+ assert.commandWorked(targetColl.insert({_id: 1, state: 'collision'}));
+
+ res = db.runCommand({
+ aggregate: sourceColl.getName(),
+ pipeline: [
+ {$project: {state: 'merge'}},
+ {
$merge: {
into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
whenMatched: "fail",
whenNotMatched: "insert"
}
- }
- ],
- {readConcern: {level: 'majority'}});
+ }
+ ],
+ cursor: {},
+ readConcern: {level: 'majority'}
+ });
- assert.eq(res.itcount(), 0);
+ assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey);
- setCommittedSnapshot(makeSnapshot());
+ // Remove the documents (not majority).
+ assert.commandWorked(targetColl.remove({_id: 1}));
+ assert.commandWorked(targetColl.remove({_id: 2}));
- res = targetColl.find().sort({_id: 1});
- // Only a single document is visible ($merge did not see the second insert).
- assert.docEq(res.next(), {_id: 2, state: 'merge'});
- assert(res.isExhausted());
+ // $merge should successfuly 'overwrite' the collection as it is 'empty' (not majority).
+ res = targetReplaceDocsColl.aggregate(
+ [
+ {$match: {state: 'before'}},
+ {$project: {state: 'merge'}},
+ {
+ $merge: {
+ into: {db: targetColl.getDB().getName(), coll: targetColl.getName()},
+ whenMatched: "fail",
+ whenNotMatched: "insert"
+ }
+ }
+ ],
+ {readConcern: {level: 'majority'}});
+
+ assert.eq(res.itcount(), 0);
+
+ setCommittedSnapshot(makeSnapshot());
+
+ res = targetColl.find().sort({_id: 1});
+ // Only a single document is visible ($merge did not see the second insert).
+ assert.docEq(res.next(), {_id: 2, state: 'merge'});
+ assert(res.isExhausted());
+}
+
+const replTest = new ReplSetTest({
+ nodes: 1,
+ oplogSize: 2,
+ nodeOptions: {
+ setParameter: 'testingSnapshotBehaviorInIsolation=true',
+ enableMajorityReadConcern: '',
+ shardsvr: ''
}
+});
+replTest.startSet();
+// Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation' set.
+replTest.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
- const replTest = new ReplSetTest({
- nodes: 1,
- oplogSize: 2,
- nodeOptions: {
- setParameter: 'testingSnapshotBehaviorInIsolation=true',
- enableMajorityReadConcern: '',
- shardsvr: ''
- }
- });
- replTest.startSet();
- // Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation' set.
- replTest.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+const mongod = replTest.getPrimary();
- const mongod = replTest.getPrimary();
+(function testSingleNode() {
+ const db = mongod.getDB("singleNode");
+ runTests(db.collection, mongod);
+})();
- (function testSingleNode() {
- const db = mongod.getDB("singleNode");
- runTests(db.collection, mongod);
- })();
+const shardingTest = new ShardingTest({
+ shards: 0,
+ mongos: 1,
+});
+assert(shardingTest.adminCommand({addShard: replTest.getURL()}));
- const shardingTest = new ShardingTest({
- shards: 0,
- mongos: 1,
- });
- assert(shardingTest.adminCommand({addShard: replTest.getURL()}));
-
- (function testUnshardedDBThroughMongos() {
- const db = shardingTest.getDB("throughMongos");
- runTests(db.unshardedDB, mongod);
- })();
+(function testUnshardedDBThroughMongos() {
+ const db = shardingTest.getDB("throughMongos");
+ runTests(db.unshardedDB, mongod);
+})();
- shardingTest.adminCommand({enableSharding: 'throughMongos'});
+shardingTest.adminCommand({enableSharding: 'throughMongos'});
- (function testUnshardedCollectionThroughMongos() {
- const db = shardingTest.getDB("throughMongos");
- runTests(db.unshardedCollection, mongod);
- })();
+(function testUnshardedCollectionThroughMongos() {
+ const db = shardingTest.getDB("throughMongos");
+ runTests(db.unshardedCollection, mongod);
+})();
- (function testShardedCollectionThroughMongos() {
- const db = shardingTest.getDB("throughMongos");
- const collection = db.shardedCollection;
- shardingTest.adminCommand({shardCollection: collection.getFullName(), key: {_id: 1}});
- runTests(collection, mongod);
- })();
+(function testShardedCollectionThroughMongos() {
+ const db = shardingTest.getDB("throughMongos");
+ const collection = db.shardedCollection;
+ shardingTest.adminCommand({shardCollection: collection.getFullName(), key: {_id: 1}});
+ runTests(collection, mongod);
+})();
- shardingTest.stop();
- replTest.stopSet();
+shardingTest.stop();
+replTest.stopSet();
})();
diff --git a/jstests/noPassthrough/parse_zone_info.js b/jstests/noPassthrough/parse_zone_info.js
index e8336f121da..c254d9d966d 100644
--- a/jstests/noPassthrough/parse_zone_info.js
+++ b/jstests/noPassthrough/parse_zone_info.js
@@ -1,20 +1,20 @@
// Tests the parsing of the timeZoneInfo parameter.
(function() {
- // Test that a bad file causes startup to fail.
- let conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/bad_timezone_info"});
- assert.eq(conn, null, "expected launching mongod with bad timezone rules to fail");
- assert.neq(-1, rawMongoProgramOutput().indexOf("Fatal assertion 40475"));
+// Test that a bad file causes startup to fail.
+let conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/bad_timezone_info"});
+assert.eq(conn, null, "expected launching mongod with bad timezone rules to fail");
+assert.neq(-1, rawMongoProgramOutput().indexOf("Fatal assertion 40475"));
- // Test that a non-existent directory causes startup to fail.
- conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/missing_directory"});
- assert.eq(conn, null, "expected launching mongod with bad timezone rules to fail");
+// Test that a non-existent directory causes startup to fail.
+conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/missing_directory"});
+assert.eq(conn, null, "expected launching mongod with bad timezone rules to fail");
- // Look for either old or new error message
- assert(rawMongoProgramOutput().indexOf("Failed to create service context") != -1 ||
- rawMongoProgramOutput().indexOf("Failed global initialization") != -1);
+// Look for either old or new error message
+assert(rawMongoProgramOutput().indexOf("Failed to create service context") != -1 ||
+ rawMongoProgramOutput().indexOf("Failed global initialization") != -1);
- // Test that startup can succeed with a good file.
- conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/good_timezone_info"});
- assert.neq(conn, null, "expected launching mongod with good timezone rules to succeed");
- MongoRunner.stopMongod(conn);
+// Test that startup can succeed with a good file.
+conn = MongoRunner.runMongod({timeZoneInfo: "jstests/libs/config_files/good_timezone_info"});
+assert.neq(conn, null, "expected launching mongod with good timezone rules to succeed");
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/partial_unique_indexes.js b/jstests/noPassthrough/partial_unique_indexes.js
index 2928790c65f..ca41fa4bb66 100644
--- a/jstests/noPassthrough/partial_unique_indexes.js
+++ b/jstests/noPassthrough/partial_unique_indexes.js
@@ -3,46 +3,44 @@
* crud operations correctly handle WriteConflictExceptions.
*/
(function() {
- "strict";
-
- let conn = MongoRunner.runMongod();
- let testDB = conn.getDB("test");
-
- let t = testDB.jstests_parallel_allops;
- t.drop();
-
- t.createIndex({x: 1, _id: 1}, {partialFilterExpression: {_id: {$lt: 500}}, unique: true});
- t.createIndex({y: -1, _id: 1}, {unique: true});
- t.createIndex({x: -1}, {partialFilterExpression: {_id: {$gte: 500}}, unique: false});
- t.createIndex({y: 1}, {unique: false});
-
- let _id = {"#RAND_INT": [0, 1000]};
- let ops = [
- {op: "remove", ns: t.getFullName(), query: {_id}},
- {op: "update", ns: t.getFullName(), query: {_id}, update: {$inc: {x: 1}}, upsert: true},
- {op: "update", ns: t.getFullName(), query: {_id}, update: {$inc: {y: 1}}, upsert: true},
- ];
-
- let seconds = 5;
- let parallel = 5;
- let host = testDB.getMongo().host;
-
- let benchArgs = {ops, seconds, parallel, host};
-
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: 'WTWriteConflictExceptionForReads',
- mode: {activationProbability: 0.01}
- }));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.01}}));
- res = benchRun(benchArgs);
- printjson({res});
-
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'WTWriteConflictException', mode: "off"}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"}));
- res = t.validate();
- assert(res.valid, tojson(res));
- MongoRunner.stopMongod(conn);
+"strict";
+
+let conn = MongoRunner.runMongod();
+let testDB = conn.getDB("test");
+
+let t = testDB.jstests_parallel_allops;
+t.drop();
+
+t.createIndex({x: 1, _id: 1}, {partialFilterExpression: {_id: {$lt: 500}}, unique: true});
+t.createIndex({y: -1, _id: 1}, {unique: true});
+t.createIndex({x: -1}, {partialFilterExpression: {_id: {$gte: 500}}, unique: false});
+t.createIndex({y: 1}, {unique: false});
+
+let _id = {"#RAND_INT": [0, 1000]};
+let ops = [
+ {op: "remove", ns: t.getFullName(), query: {_id}},
+ {op: "update", ns: t.getFullName(), query: {_id}, update: {$inc: {x: 1}}, upsert: true},
+ {op: "update", ns: t.getFullName(), query: {_id}, update: {$inc: {y: 1}}, upsert: true},
+];
+
+let seconds = 5;
+let parallel = 5;
+let host = testDB.getMongo().host;
+
+let benchArgs = {ops, seconds, parallel, host};
+
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTWriteConflictExceptionForReads', mode: {activationProbability: 0.01}}));
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.01}}));
+res = benchRun(benchArgs);
+printjson({res});
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTWriteConflictException', mode: "off"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"}));
+res = t.validate();
+assert(res.valid, tojson(res));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/pipeline_optimization_failpoint.js b/jstests/noPassthrough/pipeline_optimization_failpoint.js
index af3e294bf8e..6181da559ad 100644
--- a/jstests/noPassthrough/pipeline_optimization_failpoint.js
+++ b/jstests/noPassthrough/pipeline_optimization_failpoint.js
@@ -1,51 +1,51 @@
// Tests that pipeline optimization works properly when the failpoint isn't triggered, and is
// disabled properly when it is triggered.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
- Random.setRandomSeed();
+load("jstests/libs/analyze_plan.js"); // For aggPlan functions.
+Random.setRandomSeed();
- const conn = MongoRunner.runMongod({});
- assert.neq(conn, null, "Mongod failed to start up.");
- const testDb = conn.getDB("test");
- const coll = testDb.agg_opt;
+const conn = MongoRunner.runMongod({});
+assert.neq(conn, null, "Mongod failed to start up.");
+const testDb = conn.getDB("test");
+const coll = testDb.agg_opt;
- const pops = new Set();
- for (let i = 0; i < 25; ++i) {
- let pop;
- do {
- pop = Random.randInt(100000);
- } while (pops.has(pop));
- pops.add(pop);
+const pops = new Set();
+for (let i = 0; i < 25; ++i) {
+ let pop;
+ do {
+ pop = Random.randInt(100000);
+ } while (pops.has(pop));
+ pops.add(pop);
- assert.commandWorked(coll.insert({_id: i, city: "Cleveland", pop: pop, state: "OH"}));
- }
+ assert.commandWorked(coll.insert({_id: i, city: "Cleveland", pop: pop, state: "OH"}));
+}
- const pipeline = [{$match: {state: "OH"}}, {$sort: {pop: -1}}, {$limit: 10}];
+const pipeline = [{$match: {state: "OH"}}, {$sort: {pop: -1}}, {$limit: 10}];
- const enabledPlan = coll.explain().aggregate(pipeline);
- // Test that sort and the limit were combined.
- assert.eq(aggPlanHasStage(enabledPlan, "$limit"), false);
- assert.eq(aggPlanHasStage(enabledPlan, "$sort"), true);
- assert.eq(enabledPlan.stages.length, 2);
+const enabledPlan = coll.explain().aggregate(pipeline);
+// Test that sort and the limit were combined.
+assert.eq(aggPlanHasStage(enabledPlan, "$limit"), false);
+assert.eq(aggPlanHasStage(enabledPlan, "$sort"), true);
+assert.eq(enabledPlan.stages.length, 2);
- const enabledResult = coll.aggregate(pipeline).toArray();
+const enabledResult = coll.aggregate(pipeline).toArray();
- // Enable a failpoint that will cause pipeline optimizations to be skipped.
- assert.commandWorked(
- testDb.adminCommand({configureFailPoint: "disablePipelineOptimization", mode: "alwaysOn"}));
+// Enable a failpoint that will cause pipeline optimizations to be skipped.
+assert.commandWorked(
+ testDb.adminCommand({configureFailPoint: "disablePipelineOptimization", mode: "alwaysOn"}));
- const disabledPlan = coll.explain().aggregate(pipeline);
- // Test that the $limit still exists and hasn't been optimized away.
- assert.eq(aggPlanHasStage(disabledPlan, "$limit"), true);
- assert.eq(aggPlanHasStage(disabledPlan, "$sort"), true);
- assert.eq(disabledPlan.stages.length, 3);
+const disabledPlan = coll.explain().aggregate(pipeline);
+// Test that the $limit still exists and hasn't been optimized away.
+assert.eq(aggPlanHasStage(disabledPlan, "$limit"), true);
+assert.eq(aggPlanHasStage(disabledPlan, "$sort"), true);
+assert.eq(disabledPlan.stages.length, 3);
- const disabledResult = coll.aggregate(pipeline).toArray();
+const disabledResult = coll.aggregate(pipeline).toArray();
- // Test that the result is the same with and without optimizations enabled.
- assert.eq(enabledResult, disabledResult);
+// Test that the result is the same with and without optimizations enabled.
+assert.eq(enabledResult, disabledResult);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/plan_cache_index_create.js b/jstests/noPassthrough/plan_cache_index_create.js
index ee36f4d96eb..cc79a81bb25 100644
--- a/jstests/noPassthrough/plan_cache_index_create.js
+++ b/jstests/noPassthrough/plan_cache_index_create.js
@@ -4,159 +4,158 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- // Returns whether there is an active index build.
- function indexBuildIsRunning(testDB, indexName) {
- const indexBuildFilter = {
- "command.createIndexes": collName,
- "command.indexes.0.name": indexName,
- "msg": /^Index Build/
- };
- const curOp =
- testDB.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: indexBuildFilter}]);
- return curOp.hasNext();
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+// Returns whether there is an active index build.
+function indexBuildIsRunning(testDB, indexName) {
+ const indexBuildFilter = {
+ "command.createIndexes": collName,
+ "command.indexes.0.name": indexName,
+ "msg": /^Index Build/
+ };
+ const curOp =
+ testDB.getSiblingDB("admin").aggregate([{$currentOp: {}}, {$match: indexBuildFilter}]);
+ return curOp.hasNext();
+}
+
+// Returns whether a cached plan exists for 'query'.
+function assertDoesNotHaveCachedPlan(coll, query) {
+ const key = {query: query};
+ const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
+ assert(cmdRes.hasOwnProperty('plans') && cmdRes.plans.length == 0, tojson(cmdRes));
+}
+
+// Returns the cached plan for 'query'.
+function getIndexNameForCachedPlan(coll, query) {
+ const key = {query: query};
+ const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
+ assert(Array.isArray(cmdRes.plans) && cmdRes.plans.length > 0, tojson(cmdRes));
+ return cmdRes.plans[0].reason.stats.inputStage.indexName;
+}
+
+function runTest({rst, readDB, writeDB}) {
+ const readColl = readDB.getCollection(collName);
+ const writeColl = writeDB.getCollection(collName);
+
+ assert.commandWorked(writeDB.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}}));
+
+ const bulk = writeColl.initializeUnorderedBulkOp();
+ for (let i = 0; i < 100; ++i) {
+ bulk.insert({x: i, y: i % 10, z: 0});
}
+ assert.commandWorked(bulk.execute({w: "majority"}));
+ // We start with a baseline of 2 existing indexes as we will not cache plans when only a
+ // single plan exists.
+ assert.commandWorked(writeDB.runCommand({
+ createIndexes: collName,
+ indexes: [
+ {key: {y: 1}, name: "less_selective", background: false},
+ {key: {z: 1}, name: "least_selective", background: false}
+ ],
+ writeConcern: {w: "majority"}
+ }));
+
+ rst.waitForAllIndexBuildsToFinish(dbName, collName);
+
+ //
+ // Confirm that the plan cache is reset on start and completion of a background index build.
+ //
+
+ // Execute a find and confirm that a cached plan exists for an existing index.
+ const filter = {x: 50, y: 0, z: 0};
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
+
+ // Enable a failpoint that will cause an index build to block just after start. This will
+ // allow us to examine PlanCache contents while index creation is in flight.
+ assert.commandWorked(
+ readDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
+
+ // Build a "most selective" index in the background.
+ TestData.dbName = dbName;
+ TestData.collName = collName;
+ const createIdxShell = startParallelShell(function() {
+ const testDB = db.getSiblingDB(TestData.dbName);
+ assert.commandWorked(testDB.runCommand({
+ createIndexes: TestData.collName,
+ indexes: [{key: {x: 1}, name: "most_selective", background: true}],
+ writeConcern: {w: "majority"}
+ }));
+ }, writeDB.getMongo().port);
- // Returns whether a cached plan exists for 'query'.
- function assertDoesNotHaveCachedPlan(coll, query) {
- const key = {query: query};
- const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
- assert(cmdRes.hasOwnProperty('plans') && cmdRes.plans.length == 0, tojson(cmdRes));
- }
+ // Confirm that the index build has started.
+ assert.soon(() => indexBuildIsRunning(readDB, "most_selective"),
+ "Index build operation not found after starting via parallelShell");
- // Returns the cached plan for 'query'.
- function getIndexNameForCachedPlan(coll, query) {
- const key = {query: query};
- const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
- assert(Array.isArray(cmdRes.plans) && cmdRes.plans.length > 0, tojson(cmdRes));
- return cmdRes.plans[0].reason.stats.inputStage.indexName;
- }
+ // Confirm that there are no cached plans post index build start.
+ assertDoesNotHaveCachedPlan(readColl, filter);
- function runTest({rst, readDB, writeDB}) {
- const readColl = readDB.getCollection(collName);
- const writeColl = writeDB.getCollection(collName);
-
- assert.commandWorked(writeDB.runCommand({dropDatabase: 1, writeConcern: {w: "majority"}}));
-
- const bulk = writeColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 100; ++i) {
- bulk.insert({x: i, y: i % 10, z: 0});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
- // We start with a baseline of 2 existing indexes as we will not cache plans when only a
- // single plan exists.
- assert.commandWorked(writeDB.runCommand({
- createIndexes: collName,
- indexes: [
- {key: {y: 1}, name: "less_selective", background: false},
- {key: {z: 1}, name: "least_selective", background: false}
- ],
- writeConcern: {w: "majority"}
- }));
+ // Execute a find and confirm that a previously built index is the cached plan.
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
- rst.waitForAllIndexBuildsToFinish(dbName, collName);
-
- //
- // Confirm that the plan cache is reset on start and completion of a background index build.
- //
-
- // Execute a find and confirm that a cached plan exists for an existing index.
- const filter = {x: 50, y: 0, z: 0};
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
-
- // Enable a failpoint that will cause an index build to block just after start. This will
- // allow us to examine PlanCache contents while index creation is in flight.
- assert.commandWorked(readDB.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'}));
-
- // Build a "most selective" index in the background.
- TestData.dbName = dbName;
- TestData.collName = collName;
- const createIdxShell = startParallelShell(function() {
- const testDB = db.getSiblingDB(TestData.dbName);
- assert.commandWorked(testDB.runCommand({
- createIndexes: TestData.collName,
- indexes: [{key: {x: 1}, name: "most_selective", background: true}],
- writeConcern: {w: "majority"}
- }));
-
- }, writeDB.getMongo().port);
-
- // Confirm that the index build has started.
- assert.soon(() => indexBuildIsRunning(readDB, "most_selective"),
- "Index build operation not found after starting via parallelShell");
-
- // Confirm that there are no cached plans post index build start.
- assertDoesNotHaveCachedPlan(readColl, filter);
-
- // Execute a find and confirm that a previously built index is the cached plan.
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
-
- // Disable the hang and wait for the index build to complete.
- assert.commandWorked(
- readDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'}));
- assert.soon(() => !indexBuildIsRunning(readDB, "most_selective"));
- createIdxShell({checkExitSuccess: true});
-
- rst.waitForAllIndexBuildsToFinish(dbName, collName);
-
- // Confirm that there are no cached plans post index build.
- assertDoesNotHaveCachedPlan(readColl, filter);
-
- // Now that the index has been built, execute another find and confirm that the newly
- // created index is used.
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("most_selective", getIndexNameForCachedPlan(readColl, filter));
-
- // Drop the newly created index and confirm that the plan cache has been cleared.
- assert.commandWorked(writeDB.runCommand(
- {dropIndexes: collName, index: {x: 1}, writeConcern: {w: "majority"}}));
- assertDoesNotHaveCachedPlan(readColl, filter);
-
- //
- // Confirm that the plan cache is reset post foreground index build.
- //
-
- // Execute a find and confirm that an existing index is in the cache.
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
-
- // Build a "most selective" index in the foreground.
- assert.commandWorked(writeDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {x: 1}, name: "most_selective", background: false}],
- writeConcern: {w: "majority"}
- }));
+ // Disable the hang and wait for the index build to complete.
+ assert.commandWorked(
+ readDB.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'}));
+ assert.soon(() => !indexBuildIsRunning(readDB, "most_selective"));
+ createIdxShell({checkExitSuccess: true});
- rst.waitForAllIndexBuildsToFinish(dbName, collName);
+ rst.waitForAllIndexBuildsToFinish(dbName, collName);
- // Confirm that there are no cached plans post index build.
- assertDoesNotHaveCachedPlan(readColl, filter);
+ // Confirm that there are no cached plans post index build.
+ assertDoesNotHaveCachedPlan(readColl, filter);
- // Execute a find and confirm that the newly created index is used.
- assert.eq(readColl.find(filter).itcount(), 1);
- assert.eq("most_selective", getIndexNameForCachedPlan(readColl, filter));
+ // Now that the index has been built, execute another find and confirm that the newly
+ // created index is used.
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("most_selective", getIndexNameForCachedPlan(readColl, filter));
- // Drop the newly created index and confirm that the plan cache has been cleared.
- assert.commandWorked(writeDB.runCommand(
- {dropIndexes: collName, index: {x: 1}, writeConcern: {w: "majority"}}));
- assertDoesNotHaveCachedPlan(readColl, filter);
- }
+ // Drop the newly created index and confirm that the plan cache has been cleared.
+ assert.commandWorked(
+ writeDB.runCommand({dropIndexes: collName, index: {x: 1}, writeConcern: {w: "majority"}}));
+ assertDoesNotHaveCachedPlan(readColl, filter);
+
+ //
+ // Confirm that the plan cache is reset post foreground index build.
+ //
+
+ // Execute a find and confirm that an existing index is in the cache.
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("less_selective", getIndexNameForCachedPlan(readColl, filter));
+
+ // Build a "most selective" index in the foreground.
+ assert.commandWorked(writeDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {x: 1}, name: "most_selective", background: false}],
+ writeConcern: {w: "majority"}
+ }));
+
+ rst.waitForAllIndexBuildsToFinish(dbName, collName);
+
+ // Confirm that there are no cached plans post index build.
+ assertDoesNotHaveCachedPlan(readColl, filter);
+
+ // Execute a find and confirm that the newly created index is used.
+ assert.eq(readColl.find(filter).itcount(), 1);
+ assert.eq("most_selective", getIndexNameForCachedPlan(readColl, filter));
+
+ // Drop the newly created index and confirm that the plan cache has been cleared.
+ assert.commandWorked(
+ writeDB.runCommand({dropIndexes: collName, index: {x: 1}, writeConcern: {w: "majority"}}));
+ assertDoesNotHaveCachedPlan(readColl, filter);
+}
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- const primaryDB = rst.getPrimary().getDB(dbName);
- const secondaryDB = rst.getSecondary().getDB(dbName);
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+const primaryDB = rst.getPrimary().getDB(dbName);
+const secondaryDB = rst.getSecondary().getDB(dbName);
- runTest({rst: rst, readDB: primaryDB, writeDB: primaryDB});
- runTest({rst: rst, readDB: secondaryDB, writeDB: primaryDB});
+runTest({rst: rst, readDB: primaryDB, writeDB: primaryDB});
+runTest({rst: rst, readDB: secondaryDB, writeDB: primaryDB});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/plan_cache_list_plans_new_format.js b/jstests/noPassthrough/plan_cache_list_plans_new_format.js
index f8f96d56cbf..7c29a4b7cd4 100644
--- a/jstests/noPassthrough/plan_cache_list_plans_new_format.js
+++ b/jstests/noPassthrough/plan_cache_list_plans_new_format.js
@@ -1,59 +1,79 @@
// Confirms the planCacheListPlans output format.
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("jstests_plan_cache_list_plans_new_format");
- const coll = testDB.test;
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryCacheListPlansNewOutput: true}));
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+const testDB = conn.getDB("jstests_plan_cache_list_plans_new_format");
+const coll = testDB.test;
+assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, internalQueryCacheListPlansNewOutput: true}));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
- const testQuery = {"a": {"$gte": 0}, "b": 32};
- const testSort = {"c": -1};
- const testProjection = {};
+const testQuery = {
+ "a": {"$gte": 0},
+ "b": 32
+};
+const testSort = {
+ "c": -1
+};
+const testProjection = {};
- // Validate planCacheListPlans result fields for a query shape with a corresponding cache entry.
- assert.eq(0, coll.find(testQuery).sort(testSort).itcount());
- let key = {query: testQuery, sort: testSort, projection: testProjection};
- let res = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
+// Validate planCacheListPlans result fields for a query shape with a corresponding cache entry.
+assert.eq(0, coll.find(testQuery).sort(testSort).itcount());
+let key = {query: testQuery, sort: testSort, projection: testProjection};
+let res = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
- // Confirm both the existence and contents of "createdFromQuery".
- assert(res.hasOwnProperty("createdFromQuery"), `planCacheListPlans should return a result with
+// Confirm both the existence and contents of "createdFromQuery".
+assert(res.hasOwnProperty("createdFromQuery"),
+ `planCacheListPlans should return a result with
field "createdFromQuery"`);
- assert.eq(res.createdFromQuery.query, testQuery, `createdFromQuery should contain field "query"
+assert.eq(res.createdFromQuery.query,
+ testQuery,
+ `createdFromQuery should contain field "query"
with value ${testQuery}, instead got "createdFromQuery": ${res.createdFromQuery}`);
- assert.eq(res.createdFromQuery.sort, testSort, `createdFromQuery should contain field "sort"
+assert.eq(res.createdFromQuery.sort,
+ testSort,
+ `createdFromQuery should contain field "sort"
with value ${testSort}, instead got "createdFromQuery": ${res.createdFromQuery}`);
- assert.eq(res.createdFromQuery.projection, testProjection, `createdFromQuery should contain
+assert.eq(res.createdFromQuery.projection, testProjection, `createdFromQuery should contain
field "projection" with value ${testProjection}, instead got "createdFromQuery":
${res.createdFromQuery}`);
- // Confirm 'res' contains 'works' and a valid 'queryHash' field.
- assert(res.hasOwnProperty("works"), `planCacheListPlans result is missing "works" field`);
- assert.gt(res.works, 0, `planCacheListPlans result field "works" should be greater than 0`);
- assert(res.hasOwnProperty("queryHash"), `planCacheListPlans result is missing "queryHash"
+// Confirm 'res' contains 'works' and a valid 'queryHash' field.
+assert(res.hasOwnProperty("works"), `planCacheListPlans result is missing "works" field`);
+assert.gt(res.works, 0, `planCacheListPlans result field "works" should be greater than 0`);
+assert(res.hasOwnProperty("queryHash"),
+ `planCacheListPlans result is missing "queryHash"
field`);
- assert.eq(8, res.queryHash.length, `planCacheListPlans result field "queryHash" should be 8
+assert.eq(8,
+ res.queryHash.length,
+ `planCacheListPlans result field "queryHash" should be 8
characters long`);
- // Validate that 'cachedPlan' and 'creationExecStats' fields exist and both have consistent
- // information about the winning plan.
- assert(res.hasOwnProperty("cachedPlan"), `planCacheListPlans result is missing field
+// Validate that 'cachedPlan' and 'creationExecStats' fields exist and both have consistent
+// information about the winning plan.
+assert(res.hasOwnProperty("cachedPlan"),
+ `planCacheListPlans result is missing field
"cachedPlan" field`);
- assert(res.hasOwnProperty("creationExecStats"), `planCacheListPlans result is missing
+assert(res.hasOwnProperty("creationExecStats"),
+ `planCacheListPlans result is missing
"creationExecStats" field`);
- assert.gte(res.creationExecStats.length, 2, `creationExecStats should contain stats for both the
+assert.gte(res.creationExecStats.length,
+ 2,
+ `creationExecStats should contain stats for both the
winning plan and all rejected plans. Thus, should contain at least 2 elements but got:
${res.creationStats}`);
- let cachedStage = assert(res.cachedPlan.stage, `cachedPlan should have field "stage"`);
- let winningExecStage = assert(res.creationExecStats[0].executionStages, `creationExecStats[0]
+let cachedStage = assert(res.cachedPlan.stage, `cachedPlan should have field "stage"`);
+let winningExecStage = assert(res.creationExecStats[0].executionStages,
+ `creationExecStats[0]
should have field "executionStages"`);
- assert.eq(cachedStage, winningExecStage, `Information about the winning plan in "cachedPlan" is
+assert.eq(cachedStage,
+ winningExecStage,
+ `Information about the winning plan in "cachedPlan" is
inconsistent with the first element in "creationExecStats".`);
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/plan_cache_stats_agg_source.js b/jstests/noPassthrough/plan_cache_stats_agg_source.js
index cee1aa15907..bd90c0e4942 100644
--- a/jstests/noPassthrough/plan_cache_stats_agg_source.js
+++ b/jstests/noPassthrough/plan_cache_stats_agg_source.js
@@ -2,175 +2,167 @@
* Tests for the $planCacheStats aggregation metadata source.
*/
(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js");
-
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start up");
-
- const testDb = conn.getDB("test");
- const coll = testDb.plan_cache_stats_agg_source;
-
- // Returns a BSON object representing the plan cache entry for the query shape {a: 1, b: 1}.
- function getSingleEntryStats() {
- const cursor = coll.aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}]);
- assert(cursor.hasNext());
- const entryStats = cursor.next();
- assert(!cursor.hasNext());
- return entryStats;
- }
-
- // Fails when the collection does not exist.
- assert.commandFailedWithCode(
- testDb.runCommand(
- {aggregate: coll.getName(), pipeline: [{$planCacheStats: {}}], cursor: {}}),
- 50933);
-
- // Create a collection with two indices.
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.createIndex({b: 1}));
-
- // Should return an empty result set when there are no cache entries yet.
- assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount());
-
- // Run three distinct query shapes and check that there are three cache entries.
- assert.eq(0, coll.find({a: 1, b: 1}).itcount());
- assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
- assert.eq(0, coll.find({a: 1, b: 1, d: 1}).itcount());
- assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
-
- // We should be able to find particular cache entries by maching on the query from which the
- // entry was created.
- assert.eq(
- 1,
- coll.aggregate([{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}])
- .itcount());
- assert.eq(
- 1,
- coll.aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, c: 1}}}])
- .itcount());
- assert.eq(
- 1,
- coll.aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, d: 1}}}])
- .itcount());
-
- // A similar match on a query filter that was never run should turn up nothing.
- assert.eq(
- 0,
- coll.aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, e: 1}}}])
- .itcount());
-
- // Test $group over the plan cache metadata.
- assert.eq(1,
- coll.aggregate([{$planCacheStats: {}}, {$group: {_id: "$createdFromQuery.query.a"}}])
- .itcount());
-
- // Explain should show that a $match gets absorbed into the $planCacheStats stage.
- let explain = assert.commandWorked(coll.explain().aggregate(
- [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}]));
- assert.eq(explain.stages.length, 1);
- const planCacheStatsExplain = getAggPlanStage(explain, "$planCacheStats");
- assert.neq(planCacheStatsExplain, null);
- assert(planCacheStatsExplain.hasOwnProperty("$planCacheStats"));
- assert(planCacheStatsExplain.$planCacheStats.hasOwnProperty("match"));
- assert.eq(planCacheStatsExplain.$planCacheStats.match,
- {"createdFromQuery.query": {a: 1, b: 1}});
-
- // Get the plan cache metadata for a particular query.
- let entryStats = getSingleEntryStats();
-
- // Verify that the entry has the expected 'createdFromQuery' field.
- assert(entryStats.hasOwnProperty("createdFromQuery"));
- assert.eq(entryStats.createdFromQuery.query, {a: 1, b: 1});
- assert.eq(entryStats.createdFromQuery.sort, {});
- assert.eq(entryStats.createdFromQuery.projection, {});
- assert(!entryStats.createdFromQuery.hasOwnProperty("collation"));
-
- // Verify that $planCacheStats reports the same 'queryHash' and 'planCacheKey' as explain
- // for this query shape.
- explain = assert.commandWorked(coll.find({a: 1, b: 1}).explain());
- assert.eq(entryStats.queryHash, explain.queryPlanner.queryHash);
- assert.eq(entryStats.planCacheKey, explain.queryPlanner.planCacheKey);
-
- // Since the query shape was only run once, the plan cache entry should not be active.
- assert.eq(entryStats.isActive, false);
-
- // Sanity check 'works' value.
- assert(entryStats.hasOwnProperty("works"));
- assert.gt(entryStats.works, 0);
-
- // Check that the cached plan is an index scan either on {a: 1} or {b: 1}.
- assert(entryStats.hasOwnProperty("cachedPlan"));
- const ixscanStage = getPlanStage(entryStats.cachedPlan, "IXSCAN");
- assert.neq(ixscanStage, null);
- assert(bsonWoCompare(ixscanStage.keyPattern, {a: 1}) === 0 ||
- bsonWoCompare(ixscanStage.keyPattern, {b: 1}) === 0);
-
- // Verify that the 'timeOfCreation' for the entry is now +/- one day.
- const now = new Date();
- const yesterday = (new Date()).setDate(now.getDate() - 1);
- const tomorrow = (new Date()).setDate(now.getDate() + 1);
- assert(entryStats.hasOwnProperty("timeOfCreation"));
- assert.gt(entryStats.timeOfCreation, yesterday);
- assert.lt(entryStats.timeOfCreation, tomorrow);
-
- // There should be at least two plans in 'creationExecStats', and each should have at least one
- // index scan.
- assert(entryStats.hasOwnProperty("creationExecStats"));
- assert.gte(entryStats.creationExecStats.length, 2);
- for (let plan of entryStats.creationExecStats) {
- assert(plan.hasOwnProperty("executionStages"));
- const ixscanStages = getPlanStages(plan.executionStages, "IXSCAN");
- assert.gt(ixscanStages.length, 0);
- }
-
- // Assert that the entry has an array of at least two scores, and that all scores are greater
- // than 1.
- assert(entryStats.hasOwnProperty("candidatePlanScores"));
- assert.gte(entryStats.candidatePlanScores.length, 2);
- for (let score of entryStats.candidatePlanScores) {
- assert.gt(score, 1);
- }
-
- // Should report that no index filter is set.
- assert.eq(false, entryStats.indexFilterSet);
-
- // After creating an index filter on a different query shape, $planCacheStats should still
- // report that no index filter is set. Setting a filter clears the cache, so we rerun the query
- // associated with the cache entry.
- assert.commandWorked(testDb.runCommand({
- planCacheSetFilter: coll.getName(),
- query: {a: 1, b: 1, c: 1},
- indexes: [{a: 1}, {b: 1}]
- }));
- assert.eq(2, coll.aggregate([{$planCacheStats: {}}]).itcount());
- assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
- assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
- entryStats = getSingleEntryStats();
- assert.eq(false, entryStats.indexFilterSet);
-
- // Create an index filter on shape {a: 1, b: 1}, and verify that indexFilterSet is now true.
- assert.commandWorked(testDb.runCommand(
- {planCacheSetFilter: coll.getName(), query: {a: 1, b: 1}, indexes: [{a: 1}, {b: 1}]}));
- assert.eq(2, coll.aggregate([{$planCacheStats: {}}]).itcount());
- assert.eq(0, coll.find({a: 1, b: 1}).itcount());
- assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
- entryStats = getSingleEntryStats();
- assert.eq(true, entryStats.indexFilterSet);
-
- // Should throw an error if $planCacheStats is not first.
- assert.throws(
- () => coll.aggregate([{$match: {createdFromQuery: {a: 1, b: 1}}}, {$planCacheStats: {}}]));
-
- // If the plan cache is cleared, then there are no longer any results returned by
- // $planCacheStats.
- assert.commandWorked(testDb.runCommand({planCacheClear: coll.getName()}));
- assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount());
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start up");
+
+const testDb = conn.getDB("test");
+const coll = testDb.plan_cache_stats_agg_source;
+
+// Returns a BSON object representing the plan cache entry for the query shape {a: 1, b: 1}.
+function getSingleEntryStats() {
+ const cursor =
+ coll.aggregate([{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}]);
+ assert(cursor.hasNext());
+ const entryStats = cursor.next();
+ assert(!cursor.hasNext());
+ return entryStats;
+}
+
+// Fails when the collection does not exist.
+assert.commandFailedWithCode(
+ testDb.runCommand({aggregate: coll.getName(), pipeline: [{$planCacheStats: {}}], cursor: {}}),
+ 50933);
+
+// Create a collection with two indices.
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
+
+// Should return an empty result set when there are no cache entries yet.
+assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount());
+
+// Run three distinct query shapes and check that there are three cache entries.
+assert.eq(0, coll.find({a: 1, b: 1}).itcount());
+assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
+assert.eq(0, coll.find({a: 1, b: 1, d: 1}).itcount());
+assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
+
+// We should be able to find particular cache entries by maching on the query from which the
+// entry was created.
+assert.eq(
+ 1,
+ coll.aggregate([{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}])
+ .itcount());
+assert.eq(1,
+ coll.aggregate(
+ [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, c: 1}}}])
+ .itcount());
+assert.eq(1,
+ coll.aggregate(
+ [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, d: 1}}}])
+ .itcount());
+
+// A similar match on a query filter that was never run should turn up nothing.
+assert.eq(0,
+ coll.aggregate(
+ [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1, e: 1}}}])
+ .itcount());
+
+// Test $group over the plan cache metadata.
+assert.eq(1,
+ coll.aggregate([{$planCacheStats: {}}, {$group: {_id: "$createdFromQuery.query.a"}}])
+ .itcount());
+
+// Explain should show that a $match gets absorbed into the $planCacheStats stage.
+let explain = assert.commandWorked(coll.explain().aggregate(
+ [{$planCacheStats: {}}, {$match: {"createdFromQuery.query": {a: 1, b: 1}}}]));
+assert.eq(explain.stages.length, 1);
+const planCacheStatsExplain = getAggPlanStage(explain, "$planCacheStats");
+assert.neq(planCacheStatsExplain, null);
+assert(planCacheStatsExplain.hasOwnProperty("$planCacheStats"));
+assert(planCacheStatsExplain.$planCacheStats.hasOwnProperty("match"));
+assert.eq(planCacheStatsExplain.$planCacheStats.match, {"createdFromQuery.query": {a: 1, b: 1}});
+
+// Get the plan cache metadata for a particular query.
+let entryStats = getSingleEntryStats();
+
+// Verify that the entry has the expected 'createdFromQuery' field.
+assert(entryStats.hasOwnProperty("createdFromQuery"));
+assert.eq(entryStats.createdFromQuery.query, {a: 1, b: 1});
+assert.eq(entryStats.createdFromQuery.sort, {});
+assert.eq(entryStats.createdFromQuery.projection, {});
+assert(!entryStats.createdFromQuery.hasOwnProperty("collation"));
+
+// Verify that $planCacheStats reports the same 'queryHash' and 'planCacheKey' as explain
+// for this query shape.
+explain = assert.commandWorked(coll.find({a: 1, b: 1}).explain());
+assert.eq(entryStats.queryHash, explain.queryPlanner.queryHash);
+assert.eq(entryStats.planCacheKey, explain.queryPlanner.planCacheKey);
+
+// Since the query shape was only run once, the plan cache entry should not be active.
+assert.eq(entryStats.isActive, false);
+
+// Sanity check 'works' value.
+assert(entryStats.hasOwnProperty("works"));
+assert.gt(entryStats.works, 0);
+
+// Check that the cached plan is an index scan either on {a: 1} or {b: 1}.
+assert(entryStats.hasOwnProperty("cachedPlan"));
+const ixscanStage = getPlanStage(entryStats.cachedPlan, "IXSCAN");
+assert.neq(ixscanStage, null);
+assert(bsonWoCompare(ixscanStage.keyPattern, {a: 1}) === 0 ||
+ bsonWoCompare(ixscanStage.keyPattern, {b: 1}) === 0);
+
+// Verify that the 'timeOfCreation' for the entry is now +/- one day.
+const now = new Date();
+const yesterday = (new Date()).setDate(now.getDate() - 1);
+const tomorrow = (new Date()).setDate(now.getDate() + 1);
+assert(entryStats.hasOwnProperty("timeOfCreation"));
+assert.gt(entryStats.timeOfCreation, yesterday);
+assert.lt(entryStats.timeOfCreation, tomorrow);
+
+// There should be at least two plans in 'creationExecStats', and each should have at least one
+// index scan.
+assert(entryStats.hasOwnProperty("creationExecStats"));
+assert.gte(entryStats.creationExecStats.length, 2);
+for (let plan of entryStats.creationExecStats) {
+ assert(plan.hasOwnProperty("executionStages"));
+ const ixscanStages = getPlanStages(plan.executionStages, "IXSCAN");
+ assert.gt(ixscanStages.length, 0);
+}
+
+// Assert that the entry has an array of at least two scores, and that all scores are greater
+// than 1.
+assert(entryStats.hasOwnProperty("candidatePlanScores"));
+assert.gte(entryStats.candidatePlanScores.length, 2);
+for (let score of entryStats.candidatePlanScores) {
+ assert.gt(score, 1);
+}
+
+// Should report that no index filter is set.
+assert.eq(false, entryStats.indexFilterSet);
+
+// After creating an index filter on a different query shape, $planCacheStats should still
+// report that no index filter is set. Setting a filter clears the cache, so we rerun the query
+// associated with the cache entry.
+assert.commandWorked(testDb.runCommand(
+ {planCacheSetFilter: coll.getName(), query: {a: 1, b: 1, c: 1}, indexes: [{a: 1}, {b: 1}]}));
+assert.eq(2, coll.aggregate([{$planCacheStats: {}}]).itcount());
+assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
+assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
+entryStats = getSingleEntryStats();
+assert.eq(false, entryStats.indexFilterSet);
+
+// Create an index filter on shape {a: 1, b: 1}, and verify that indexFilterSet is now true.
+assert.commandWorked(testDb.runCommand(
+ {planCacheSetFilter: coll.getName(), query: {a: 1, b: 1}, indexes: [{a: 1}, {b: 1}]}));
+assert.eq(2, coll.aggregate([{$planCacheStats: {}}]).itcount());
+assert.eq(0, coll.find({a: 1, b: 1}).itcount());
+assert.eq(3, coll.aggregate([{$planCacheStats: {}}]).itcount());
+entryStats = getSingleEntryStats();
+assert.eq(true, entryStats.indexFilterSet);
+
+// Should throw an error if $planCacheStats is not first.
+assert.throws(
+ () => coll.aggregate([{$match: {createdFromQuery: {a: 1, b: 1}}}, {$planCacheStats: {}}]));
+
+// If the plan cache is cleared, then there are no longer any results returned by
+// $planCacheStats.
+assert.commandWorked(testDb.runCommand({planCacheClear: coll.getName()}));
+assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount());
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/port_options.js b/jstests/noPassthrough/port_options.js
index 8f4d4becc3e..72fb5bf755d 100644
--- a/jstests/noPassthrough/port_options.js
+++ b/jstests/noPassthrough/port_options.js
@@ -1,55 +1,55 @@
// Check --port= edge behaviors.
(function() {
- 'use strict';
- jsTest.log("Setting port=0 is okay unless binding to multiple IP interfaces.");
-
- function runTest(bindIP, expectOk) {
- jsTest.log("".concat("Testing with bindIP=[", bindIP, "], expectOk=[", expectOk, "]"));
-
- clearRawMongoProgramOutput();
-
- let pid = startMongoProgramNoConnect(
- "mongod", "--ipv6", "--dbpath", MongoRunner.dataDir, "--bind_ip", bindIP, "--port", 0);
- jsTest.log("".concat("pid=[", pid, "]"));
-
- if (expectOk) {
- let port;
-
- // We use assert.soonNoExcept() here because the mongod may not be logging yet.
- assert.soonNoExcept(() => {
- const logContents = rawMongoProgramOutput();
- const found = logContents.match(/waiting for connections on port (\d+)/);
- if (found !== null) {
- print("Found message from mongod with port it is listening on: " + found[0]);
- port = found[1];
- return true;
- }
- });
-
- const connStr = `127.0.0.1:${port}`;
- print("Attempting to connect to " + connStr);
-
- let conn;
- assert.soonNoExcept(() => {
- conn = new Mongo(connStr);
+'use strict';
+jsTest.log("Setting port=0 is okay unless binding to multiple IP interfaces.");
+
+function runTest(bindIP, expectOk) {
+ jsTest.log("".concat("Testing with bindIP=[", bindIP, "], expectOk=[", expectOk, "]"));
+
+ clearRawMongoProgramOutput();
+
+ let pid = startMongoProgramNoConnect(
+ "mongod", "--ipv6", "--dbpath", MongoRunner.dataDir, "--bind_ip", bindIP, "--port", 0);
+ jsTest.log("".concat("pid=[", pid, "]"));
+
+ if (expectOk) {
+ let port;
+
+ // We use assert.soonNoExcept() here because the mongod may not be logging yet.
+ assert.soonNoExcept(() => {
+ const logContents = rawMongoProgramOutput();
+ const found = logContents.match(/waiting for connections on port (\d+)/);
+ if (found !== null) {
+ print("Found message from mongod with port it is listening on: " + found[0]);
+ port = found[1];
return true;
- });
- assert.commandWorked(conn.adminCommand({ping: 1}));
-
- stopMongoProgramByPid(pid);
- } else {
- const ec = waitProgram(pid);
- assert.eq(ec, MongoRunner.EXIT_NET_ERROR);
- assert.soonNoExcept(() => {
- const logContents = rawMongoProgramOutput();
- const found = logContents.match(
- /Port 0 \(ephemeral port\) is not allowed when listening on multiple IP interfaces/);
- return (found !== null);
- }, "No warning issued for invalid port=0 usage");
- }
+ }
+ });
+
+ const connStr = `127.0.0.1:${port}`;
+ print("Attempting to connect to " + connStr);
+
+ let conn;
+ assert.soonNoExcept(() => {
+ conn = new Mongo(connStr);
+ return true;
+ });
+ assert.commandWorked(conn.adminCommand({ping: 1}));
+
+ stopMongoProgramByPid(pid);
+ } else {
+ const ec = waitProgram(pid);
+ assert.eq(ec, MongoRunner.EXIT_NET_ERROR);
+ assert.soonNoExcept(() => {
+ const logContents = rawMongoProgramOutput();
+ const found = logContents.match(
+ /Port 0 \(ephemeral port\) is not allowed when listening on multiple IP interfaces/);
+ return (found !== null);
+ }, "No warning issued for invalid port=0 usage");
}
+}
- runTest("127.0.0.1", true);
- runTest("127.0.0.1,::1", false);
+runTest("127.0.0.1", true);
+runTest("127.0.0.1,::1", false);
}());
diff --git a/jstests/noPassthrough/predictive_connpool.js b/jstests/noPassthrough/predictive_connpool.js
index c38d01601e2..d92d1ba9a2f 100644
--- a/jstests/noPassthrough/predictive_connpool.js
+++ b/jstests/noPassthrough/predictive_connpool.js
@@ -5,155 +5,155 @@ load("jstests/libs/parallelTester.js");
*/
(function() {
- "use strict";
-
- const st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 2, protocolVersion: 1}});
- const kDbName = 'test';
- const mongosClient = st.s;
- const mongos = mongosClient.getDB(kDbName);
- const rst = st.rs0;
- const primary = rst.getPrimary();
- const secondary = rst.getSecondaries()[0];
-
- const cfg = primary.getDB('local').system.replset.findOne();
- const allHosts = cfg.members.map(x => x.host);
- const primaryOnly = [primary.name];
- const secondaryOnly = [secondary.name];
-
- function configureReplSetFailpoint(name, modeValue) {
- st.rs0.nodes.forEach(function(node) {
- assert.commandWorked(node.getDB("admin").runCommand({
- configureFailPoint: name,
- mode: modeValue,
- data: {shouldCheckForInterrupt: true},
- }));
- });
+"use strict";
+
+const st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 2, protocolVersion: 1}});
+const kDbName = 'test';
+const mongosClient = st.s;
+const mongos = mongosClient.getDB(kDbName);
+const rst = st.rs0;
+const primary = rst.getPrimary();
+const secondary = rst.getSecondaries()[0];
+
+const cfg = primary.getDB('local').system.replset.findOne();
+const allHosts = cfg.members.map(x => x.host);
+const primaryOnly = [primary.name];
+const secondaryOnly = [secondary.name];
+
+function configureReplSetFailpoint(name, modeValue) {
+ st.rs0.nodes.forEach(function(node) {
+ assert.commandWorked(node.getDB("admin").runCommand({
+ configureFailPoint: name,
+ mode: modeValue,
+ data: {shouldCheckForInterrupt: true},
+ }));
+ });
+}
+
+var threads = [];
+
+function launchFinds({times, readPref, shouldFail}) {
+ jsTestLog("Starting " + times + " connections");
+ for (var i = 0; i < times; i++) {
+ var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
+ var client = new Mongo(connStr);
+ const ret = client.getDB(dbName).runCommand(
+ {find: "test", limit: 1, "$readPreference": {mode: readPref}});
+
+ if (shouldFail) {
+ assert.commandFailed(ret);
+ } else {
+ assert.commandWorked(ret);
+ }
+ }, st.s.host, readPref, kDbName, shouldFail);
+ thread.start();
+ threads.push(thread);
}
-
- var threads = [];
-
- function launchFinds({times, readPref, shouldFail}) {
- jsTestLog("Starting " + times + " connections");
- for (var i = 0; i < times; i++) {
- var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
- var client = new Mongo(connStr);
- const ret = client.getDB(dbName).runCommand(
- {find: "test", limit: 1, "$readPreference": {mode: readPref}});
-
- if (shouldFail) {
- assert.commandFailed(ret);
- } else {
- assert.commandWorked(ret);
- }
- }, st.s.host, readPref, kDbName, shouldFail);
- thread.start();
- threads.push(thread);
+}
+
+function updateSetParameters(params) {
+ var cmd = Object.assign({"setParameter": 1}, params);
+ assert.commandWorked(mongos.adminCommand(cmd));
+}
+
+function dropConnections() {
+ assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
+}
+
+var currentCheckNum = 0;
+function hasConnPoolStats(args) {
+ const checkNum = currentCheckNum++;
+ jsTestLog("Check #" + checkNum + ": " + tojson(args));
+ var {ready, pending, active, hosts, isAbsent} = args;
+
+ ready = ready ? ready : 0;
+ pending = pending ? pending : 0;
+ active = active ? active : 0;
+ hosts = hosts ? hosts : allHosts;
+
+ function checkStats(res, host) {
+ var stats = res.hosts[host];
+ if (!stats) {
+ jsTestLog("Connection stats for " + host + " are absent");
+ return isAbsent;
}
- }
- function updateSetParameters(params) {
- var cmd = Object.assign({"setParameter": 1}, params);
- assert.commandWorked(mongos.adminCommand(cmd));
+ jsTestLog("Connection stats for " + host + ": " + tojson(stats));
+ return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
}
- function dropConnections() {
- assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
+ function checkAllStats() {
+ var res = mongos.adminCommand({connPoolStats: 1});
+ return hosts.map(host => checkStats(res, host)).every(x => x);
}
- var currentCheckNum = 0;
- function hasConnPoolStats(args) {
- const checkNum = currentCheckNum++;
- jsTestLog("Check #" + checkNum + ": " + tojson(args));
- var {ready, pending, active, hosts, isAbsent} = args;
-
- ready = ready ? ready : 0;
- pending = pending ? pending : 0;
- active = active ? active : 0;
- hosts = hosts ? hosts : allHosts;
-
- function checkStats(res, host) {
- var stats = res.hosts[host];
- if (!stats) {
- jsTestLog("Connection stats for " + host + " are absent");
- return isAbsent;
- }
-
- jsTestLog("Connection stats for " + host + ": " + tojson(stats));
- return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
- }
-
- function checkAllStats() {
- var res = mongos.adminCommand({connPoolStats: 1});
- return hosts.map(host => checkStats(res, host)).every(x => x);
- }
-
- assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
+ assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
- jsTestLog("Check #" + checkNum + " successful");
- }
+ jsTestLog("Check #" + checkNum + " successful");
+}
- function checkConnPoolStats() {
- const ret = mongos.runCommand({"connPoolStats": 1});
- const poolStats = ret["pools"]["NetworkInterfaceTL-TaskExecutorPool-0"];
- jsTestLog(poolStats);
- }
+function checkConnPoolStats() {
+ const ret = mongos.runCommand({"connPoolStats": 1});
+ const poolStats = ret["pools"]["NetworkInterfaceTL-TaskExecutorPool-0"];
+ jsTestLog(poolStats);
+}
- function walkThroughBehavior({primaryFollows, secondaryFollows}) {
- // Start pooling with a ping
- mongos.adminCommand({multicast: {ping: 0}});
- checkConnPoolStats();
+function walkThroughBehavior({primaryFollows, secondaryFollows}) {
+ // Start pooling with a ping
+ mongos.adminCommand({multicast: {ping: 0}});
+ checkConnPoolStats();
- // Block connections from finishing
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ // Block connections from finishing
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- // Launch a bunch of primary finds
- launchFinds({times: 10, readPref: "primary"});
+ // Launch a bunch of primary finds
+ launchFinds({times: 10, readPref: "primary"});
- // Confirm we follow
- hasConnPoolStats({active: 10, hosts: primaryOnly});
- if (secondaryFollows) {
- hasConnPoolStats({ready: 10, hosts: secondaryOnly});
- }
- checkConnPoolStats();
+ // Confirm we follow
+ hasConnPoolStats({active: 10, hosts: primaryOnly});
+ if (secondaryFollows) {
+ hasConnPoolStats({ready: 10, hosts: secondaryOnly});
+ }
+ checkConnPoolStats();
- // Launch a bunch of secondary finds
- launchFinds({times: 20, readPref: "secondary"});
+ // Launch a bunch of secondary finds
+ launchFinds({times: 20, readPref: "secondary"});
- // Confirm we follow
- hasConnPoolStats({active: 20, hosts: secondaryOnly});
- if (primaryFollows) {
- hasConnPoolStats({ready: 10, active: 10, hosts: primaryOnly});
- }
- checkConnPoolStats();
+ // Confirm we follow
+ hasConnPoolStats({active: 20, hosts: secondaryOnly});
+ if (primaryFollows) {
+ hasConnPoolStats({ready: 10, active: 10, hosts: primaryOnly});
+ }
+ checkConnPoolStats();
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
- dropConnections();
- }
+ dropConnections();
+}
- assert.writeOK(mongos.test.insert({x: 1}));
- assert.writeOK(mongos.test.insert({x: 2}));
- assert.writeOK(mongos.test.insert({x: 3}));
- st.rs0.awaitReplication();
+assert.writeOK(mongos.test.insert({x: 1}));
+assert.writeOK(mongos.test.insert({x: 2}));
+assert.writeOK(mongos.test.insert({x: 3}));
+st.rs0.awaitReplication();
- jsTestLog("Following disabled");
- walkThroughBehavior({primaryFollows: false, secondaryFollows: false});
+jsTestLog("Following disabled");
+walkThroughBehavior({primaryFollows: false, secondaryFollows: false});
- jsTestLog("Following primary node");
- updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "matchPrimaryNode"});
- walkThroughBehavior({primaryFollows: false, secondaryFollows: true});
+jsTestLog("Following primary node");
+updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "matchPrimaryNode"});
+walkThroughBehavior({primaryFollows: false, secondaryFollows: true});
- // jsTestLog("Following busiest node");
- // updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "matchBusiestNode"});
- // walkThroughBehavior({primaryFollows: true, secondaryFollows: true});
+// jsTestLog("Following busiest node");
+// updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "matchBusiestNode"});
+// walkThroughBehavior({primaryFollows: true, secondaryFollows: true});
- jsTestLog("Reseting to disabled");
- updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "disabled"});
- walkThroughBehavior({primaryFollows: false, secondaryFollows: false});
+jsTestLog("Reseting to disabled");
+updateSetParameters({ShardingTaskExecutorPoolReplicaSetMatching: "disabled"});
+walkThroughBehavior({primaryFollows: false, secondaryFollows: false});
- threads.forEach(function(thread) {
- thread.join();
- });
+threads.forEach(function(thread) {
+ thread.join();
+});
- st.stop();
+st.stop();
})();
diff --git a/jstests/noPassthrough/profile_agg_multiple_batches.js b/jstests/noPassthrough/profile_agg_multiple_batches.js
index 00fb738aca2..6d21e254bde 100644
--- a/jstests/noPassthrough/profile_agg_multiple_batches.js
+++ b/jstests/noPassthrough/profile_agg_multiple_batches.js
@@ -3,33 +3,35 @@
// @tags: [requires_profiling]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/profiler.js");
+load("jstests/libs/profiler.js");
- // Setting internalDocumentSourceCursorBatchSizeBytes=1 ensures that multiple batches pass
- // through DocumentSourceCursor.
- const options = {setParameter: "internalDocumentSourceCursorBatchSizeBytes=1"};
- const conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(options));
+// Setting internalDocumentSourceCursorBatchSizeBytes=1 ensures that multiple batches pass
+// through DocumentSourceCursor.
+const options = {
+ setParameter: "internalDocumentSourceCursorBatchSizeBytes=1"
+};
+const conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(options));
- const testDB = conn.getDB("test");
- const coll = testDB.getCollection("coll");
+const testDB = conn.getDB("test");
+const coll = testDB.getCollection("coll");
- testDB.setProfilingLevel(2);
+testDB.setProfilingLevel(2);
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i, b: i}));
- }
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i, b: i}));
+}
- assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
- assert.eq(8, coll.aggregate([{$match: {a: {$gte: 2}}}, {$group: {_id: "$b"}}]).itcount());
+assert.eq(8, coll.aggregate([{$match: {a: {$gte: 2}}}, {$group: {_id: "$b"}}]).itcount());
- const profileObj = getLatestProfilerEntry(testDB);
+const profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.keysExamined, 8, tojson(profileObj));
- assert.eq(profileObj.docsExamined, 8, tojson(profileObj));
+assert.eq(profileObj.keysExamined, 8, tojson(profileObj));
+assert.eq(profileObj.docsExamined, 8, tojson(profileObj));
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/profile_interrupted_op.js b/jstests/noPassthrough/profile_interrupted_op.js
index 3fa681cee71..f49a126731d 100644
--- a/jstests/noPassthrough/profile_interrupted_op.js
+++ b/jstests/noPassthrough/profile_interrupted_op.js
@@ -6,70 +6,69 @@
// @tags: [requires_persistence, requires_profiling]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js"); // For checkLog.
+load("jstests/libs/check_log.js"); // For checkLog.
- //
- // Start mongo with profiling disabled, create an empty database, and populate it with a
- // collection that has one document.
- //
- let standalone = MongoRunner.runMongod({profile: "0"});
+//
+// Start mongo with profiling disabled, create an empty database, and populate it with a
+// collection that has one document.
+//
+let standalone = MongoRunner.runMongod({profile: "0"});
- let db = standalone.getDB("profile_interrupted_op");
- assert.commandWorked(db.dropDatabase());
+let db = standalone.getDB("profile_interrupted_op");
+assert.commandWorked(db.dropDatabase());
- let coll = db.getCollection("test");
- assert.commandWorked(coll.insert({a: 1}));
+let coll = db.getCollection("test");
+assert.commandWorked(coll.insert({a: 1}));
- //
- // Stop the mongod and then restart it, this time with profiling enabled. Note that enabling
- // profiling on a running database would create the 'system.profile' collection, which we don't
- // yet want created for this test.
- //
- MongoRunner.stopMongod(standalone);
- standalone = MongoRunner.runMongod(
- {restart: true, cleanData: false, dbpath: standalone.dbpath, profile: "2"});
+//
+// Stop the mongod and then restart it, this time with profiling enabled. Note that enabling
+// profiling on a running database would create the 'system.profile' collection, which we don't
+// yet want created for this test.
+//
+MongoRunner.stopMongod(standalone);
+standalone = MongoRunner.runMongod(
+ {restart: true, cleanData: false, dbpath: standalone.dbpath, profile: "2"});
- //
- // Execute a query that will get interrupted for exceeding its 'maxTimeMS' value. The profiler
- // will attempt to create the 'system.profile' collection while the operation context is already
- // marked as interrupted.
- //
- db = standalone.getDB("profile_interrupted_op");
- coll = db.getCollection("test");
- const err = assert.throws(function() {
- coll.find({
- $where: function() {
- sleep(3600);
- return true;
- }
- })
- .maxTimeMS(1000)
- .count();
- });
- assert.contains(err.code,
- [ErrorCodes.MaxTimeMSExpired, ErrorCodes.Interrupted, ErrorCodes.InternalError],
- err);
+//
+// Execute a query that will get interrupted for exceeding its 'maxTimeMS' value. The profiler
+// will attempt to create the 'system.profile' collection while the operation context is already
+// marked as interrupted.
+//
+db = standalone.getDB("profile_interrupted_op");
+coll = db.getCollection("test");
+const err = assert.throws(function() {
+ coll.find({
+ $where: function() {
+ sleep(3600);
+ return true;
+ }
+ })
+ .maxTimeMS(1000)
+ .count();
+});
+assert.contains(
+ err.code, [ErrorCodes.MaxTimeMSExpired, ErrorCodes.Interrupted, ErrorCodes.InternalError], err);
- //
- // Profiling is not necessary for the rest of the test. We turn it off to make sure it doesn't
- // interfere with any remaining commands.
- //
- db.setProfilingLevel(0);
+//
+// Profiling is not necessary for the rest of the test. We turn it off to make sure it doesn't
+// interfere with any remaining commands.
+//
+db.setProfilingLevel(0);
- //
- // The mongod should print out a warning to indicate the potential need for a manually created
- // 'system.profile' collection.
- //
- checkLog.contains(standalone, "Manually create profile collection");
+//
+// The mongod should print out a warning to indicate the potential need for a manually created
+// 'system.profile' collection.
+//
+checkLog.contains(standalone, "Manually create profile collection");
- //
- // The mongod should not create the 'system.profile' collection automatically.
- //
- const res = db.runCommand({listCollections: 1, filter: {name: "system.profile"}});
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch, [], res);
+//
+// The mongod should not create the 'system.profile' collection automatically.
+//
+const res = db.runCommand({listCollections: 1, filter: {name: "system.profile"}});
+assert.commandWorked(res);
+assert.eq(res.cursor.firstBatch, [], res);
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
})();
diff --git a/jstests/noPassthrough/query_knobs_validation.js b/jstests/noPassthrough/query_knobs_validation.js
index 536f4d6f995..9bec3018c1d 100644
--- a/jstests/noPassthrough/query_knobs_validation.js
+++ b/jstests/noPassthrough/query_knobs_validation.js
@@ -6,166 +6,165 @@
*/
(function() {
- "use strict";
-
- const conn = MongoRunner.runMongod();
- const testDB = conn.getDB("admin");
- const expectedParamDefaults = {
- internalQueryPlanEvaluationWorks: 10000,
- internalQueryPlanEvaluationCollFraction: 0.3,
- internalQueryPlanEvaluationMaxResults: 101,
- internalQueryCacheSize: 5000,
- internalQueryCacheFeedbacksStored: 20,
- internalQueryCacheEvictionRatio: 10.0,
- internalQueryCacheWorksGrowthCoefficient: 2.0,
- internalQueryCacheDisableInactiveEntries: false,
- internalQueryCacheListPlansNewOutput: false,
- internalQueryPlannerMaxIndexedSolutions: 64,
- internalQueryEnumerationMaxOrSolutions: 10,
- internalQueryEnumerationMaxIntersectPerAnd: 3,
- internalQueryForceIntersectionPlans: false,
- internalQueryPlannerEnableIndexIntersection: true,
- internalQueryPlannerEnableHashIntersection: false,
- internalQueryPlanOrChildrenIndependently: true,
- internalQueryMaxScansToExplode: 200,
- internalQueryExecMaxBlockingSortBytes: 32 * 1024 * 1024,
- internalQueryExecYieldIterations: 128,
- internalQueryExecYieldPeriodMS: 10,
- internalQueryFacetBufferSizeBytes: 100 * 1024 * 1024,
- internalDocumentSourceCursorBatchSizeBytes: 4 * 1024 * 1024,
- internalDocumentSourceLookupCacheSizeBytes: 100 * 1024 * 1024,
- internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024,
- internalLookupStageIntermediateDocumentMaxSizeBytes: 100 * 1024 * 1024,
- internalDocumentSourceGroupMaxMemoryBytes: 100 * 1024 * 1024,
- // Should be half the value of 'internalQueryExecYieldIterations' parameter.
- internalInsertMaxBatchSize: 64,
- internalQueryPlannerGenerateCoveredWholeIndexScans: false,
- internalQueryIgnoreUnknownJSONSchemaKeywords: false,
- internalQueryProhibitBlockingMergeOnMongoS: false,
- };
-
- function assertDefaultParameterValues() {
- // For each parameter in 'expectedParamDefaults' verify that the value returned by
- // 'getParameter' is same as the expected value.
- for (let paramName in expectedParamDefaults) {
- const expectedParamValue = expectedParamDefaults[paramName];
- const getParamRes =
- assert.commandWorked(testDB.adminCommand({getParameter: 1, [paramName]: 1}));
- assert.eq(getParamRes[paramName], expectedParamValue);
- }
- }
-
- function assertSetParameterSucceeds(paramName, value) {
- assert.commandWorked(testDB.adminCommand({setParameter: 1, [paramName]: value}));
- // Verify that the set parameter actually worked by doing a get and verifying the value.
+"use strict";
+
+const conn = MongoRunner.runMongod();
+const testDB = conn.getDB("admin");
+const expectedParamDefaults = {
+ internalQueryPlanEvaluationWorks: 10000,
+ internalQueryPlanEvaluationCollFraction: 0.3,
+ internalQueryPlanEvaluationMaxResults: 101,
+ internalQueryCacheSize: 5000,
+ internalQueryCacheFeedbacksStored: 20,
+ internalQueryCacheEvictionRatio: 10.0,
+ internalQueryCacheWorksGrowthCoefficient: 2.0,
+ internalQueryCacheDisableInactiveEntries: false,
+ internalQueryCacheListPlansNewOutput: false,
+ internalQueryPlannerMaxIndexedSolutions: 64,
+ internalQueryEnumerationMaxOrSolutions: 10,
+ internalQueryEnumerationMaxIntersectPerAnd: 3,
+ internalQueryForceIntersectionPlans: false,
+ internalQueryPlannerEnableIndexIntersection: true,
+ internalQueryPlannerEnableHashIntersection: false,
+ internalQueryPlanOrChildrenIndependently: true,
+ internalQueryMaxScansToExplode: 200,
+ internalQueryExecMaxBlockingSortBytes: 32 * 1024 * 1024,
+ internalQueryExecYieldIterations: 128,
+ internalQueryExecYieldPeriodMS: 10,
+ internalQueryFacetBufferSizeBytes: 100 * 1024 * 1024,
+ internalDocumentSourceCursorBatchSizeBytes: 4 * 1024 * 1024,
+ internalDocumentSourceLookupCacheSizeBytes: 100 * 1024 * 1024,
+ internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024,
+ internalLookupStageIntermediateDocumentMaxSizeBytes: 100 * 1024 * 1024,
+ internalDocumentSourceGroupMaxMemoryBytes: 100 * 1024 * 1024,
+ // Should be half the value of 'internalQueryExecYieldIterations' parameter.
+ internalInsertMaxBatchSize: 64,
+ internalQueryPlannerGenerateCoveredWholeIndexScans: false,
+ internalQueryIgnoreUnknownJSONSchemaKeywords: false,
+ internalQueryProhibitBlockingMergeOnMongoS: false,
+};
+
+function assertDefaultParameterValues() {
+ // For each parameter in 'expectedParamDefaults' verify that the value returned by
+ // 'getParameter' is same as the expected value.
+ for (let paramName in expectedParamDefaults) {
+ const expectedParamValue = expectedParamDefaults[paramName];
const getParamRes =
assert.commandWorked(testDB.adminCommand({getParameter: 1, [paramName]: 1}));
- assert.eq(getParamRes[paramName], value);
- }
-
- function assertSetParameterFails(paramName, value) {
- assert.commandFailedWithCode(testDB.adminCommand({setParameter: 1, [paramName]: value}),
- ErrorCodes.BadValue);
+ assert.eq(getParamRes[paramName], expectedParamValue);
}
+}
+
+function assertSetParameterSucceeds(paramName, value) {
+ assert.commandWorked(testDB.adminCommand({setParameter: 1, [paramName]: value}));
+ // Verify that the set parameter actually worked by doing a get and verifying the value.
+ const getParamRes =
+ assert.commandWorked(testDB.adminCommand({getParameter: 1, [paramName]: 1}));
+ assert.eq(getParamRes[paramName], value);
+}
+
+function assertSetParameterFails(paramName, value) {
+ assert.commandFailedWithCode(testDB.adminCommand({setParameter: 1, [paramName]: value}),
+ ErrorCodes.BadValue);
+}
+
+// Verify that the default values are set as expected when the server starts up.
+assertDefaultParameterValues();
+
+assertSetParameterSucceeds("internalQueryPlanEvaluationWorks", 11);
+assertSetParameterFails("internalQueryPlanEvaluationWorks", 0);
+assertSetParameterFails("internalQueryPlanEvaluationWorks", -1);
+
+assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 0.0);
+assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 0.444);
+assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 1.0);
+assertSetParameterFails("internalQueryPlanEvaluationCollFraction", -0.1);
+assertSetParameterFails("internalQueryPlanEvaluationCollFraction", 1.0001);
+
+assertSetParameterSucceeds("internalQueryPlanEvaluationMaxResults", 11);
+assertSetParameterSucceeds("internalQueryPlanEvaluationMaxResults", 0);
+assertSetParameterFails("internalQueryPlanEvaluationMaxResults", -1);
+
+assertSetParameterSucceeds("internalQueryCacheSize", 1);
+assertSetParameterSucceeds("internalQueryCacheSize", 0);
+assertSetParameterFails("internalQueryCacheSize", -1);
+
+assertSetParameterSucceeds("internalQueryCacheFeedbacksStored", 1);
+assertSetParameterSucceeds("internalQueryCacheFeedbacksStored", 0);
+assertSetParameterFails("internalQueryCacheFeedbacksStored", -1);
+
+assertSetParameterSucceeds("internalQueryCacheEvictionRatio", 1.0);
+assertSetParameterSucceeds("internalQueryCacheEvictionRatio", 0.0);
+assertSetParameterFails("internalQueryCacheEvictionRatio", -0.1);
+
+assertSetParameterSucceeds("internalQueryCacheWorksGrowthCoefficient", 1.1);
+assertSetParameterFails("internalQueryCacheWorksGrowthCoefficient", 1.0);
+assertSetParameterFails("internalQueryCacheWorksGrowthCoefficient", 0.1);
+
+assertSetParameterSucceeds("internalQueryPlannerMaxIndexedSolutions", 11);
+assertSetParameterSucceeds("internalQueryPlannerMaxIndexedSolutions", 0);
+assertSetParameterFails("internalQueryPlannerMaxIndexedSolutions", -1);
+
+assertSetParameterSucceeds("internalQueryEnumerationMaxOrSolutions", 11);
+assertSetParameterSucceeds("internalQueryEnumerationMaxOrSolutions", 0);
+assertSetParameterFails("internalQueryEnumerationMaxOrSolutions", -1);
+
+assertSetParameterSucceeds("internalQueryEnumerationMaxIntersectPerAnd", 11);
+assertSetParameterSucceeds("internalQueryEnumerationMaxIntersectPerAnd", 0);
+assertSetParameterFails("internalQueryEnumerationMaxIntersectPerAnd", -1);
+
+assertSetParameterSucceeds("internalQueryMaxScansToExplode", 11);
+assertSetParameterSucceeds("internalQueryMaxScansToExplode", 0);
+assertSetParameterFails("internalQueryMaxScansToExplode", -1);
+
+assertSetParameterSucceeds("internalQueryExecMaxBlockingSortBytes", 11);
+assertSetParameterSucceeds("internalQueryExecMaxBlockingSortBytes", 0);
+assertSetParameterFails("internalQueryExecMaxBlockingSortBytes", -1);
+
+assertSetParameterSucceeds("internalQueryExecYieldIterations", 10);
+assertSetParameterSucceeds("internalQueryExecYieldIterations", 0);
+assertSetParameterSucceeds("internalQueryExecYieldIterations", -1);
+
+assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 1);
+assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 0);
+assertSetParameterFails("internalQueryExecYieldPeriodMS", -1);
+
+assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 11);
+assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 0);
+assertSetParameterFails("internalQueryExecYieldPeriodMS", -1);
+
+assertSetParameterSucceeds("internalQueryFacetBufferSizeBytes", 1);
+assertSetParameterFails("internalQueryFacetBufferSizeBytes", 0);
+assertSetParameterFails("internalQueryFacetBufferSizeBytes", -1);
+
+assertSetParameterSucceeds("internalDocumentSourceSortMaxBlockingSortBytes", 11);
+assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", 0);
+assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", -1);
+
+assertSetParameterSucceeds("internalDocumentSourceGroupMaxMemoryBytes", 11);
+assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", 0);
+assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", -1);
+
+// Internal BSON max object size is slightly larger than the max user object size, to
+// accommodate command metadata.
+const bsonUserSizeLimit = assert.commandWorked(testDB.isMaster()).maxBsonObjectSize;
+const bsonObjMaxInternalSize = bsonUserSizeLimit + 16 * 1024;
+
+assertSetParameterFails("internalLookupStageIntermediateDocumentMaxSizeBytes", 1);
+assertSetParameterSucceeds("internalLookupStageIntermediateDocumentMaxSizeBytes",
+ bsonObjMaxInternalSize);
+
+assertSetParameterSucceeds("internalInsertMaxBatchSize", 11);
+assertSetParameterFails("internalInsertMaxBatchSize", 0);
+assertSetParameterFails("internalInsertMaxBatchSize", -1);
+
+assertSetParameterSucceeds("internalDocumentSourceCursorBatchSizeBytes", 11);
+assertSetParameterSucceeds("internalDocumentSourceCursorBatchSizeBytes", 0);
+assertSetParameterFails("internalDocumentSourceCursorBatchSizeBytes", -1);
- // Verify that the default values are set as expected when the server starts up.
- assertDefaultParameterValues();
-
- assertSetParameterSucceeds("internalQueryPlanEvaluationWorks", 11);
- assertSetParameterFails("internalQueryPlanEvaluationWorks", 0);
- assertSetParameterFails("internalQueryPlanEvaluationWorks", -1);
-
- assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 0.0);
- assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 0.444);
- assertSetParameterSucceeds("internalQueryPlanEvaluationCollFraction", 1.0);
- assertSetParameterFails("internalQueryPlanEvaluationCollFraction", -0.1);
- assertSetParameterFails("internalQueryPlanEvaluationCollFraction", 1.0001);
-
- assertSetParameterSucceeds("internalQueryPlanEvaluationMaxResults", 11);
- assertSetParameterSucceeds("internalQueryPlanEvaluationMaxResults", 0);
- assertSetParameterFails("internalQueryPlanEvaluationMaxResults", -1);
-
- assertSetParameterSucceeds("internalQueryCacheSize", 1);
- assertSetParameterSucceeds("internalQueryCacheSize", 0);
- assertSetParameterFails("internalQueryCacheSize", -1);
-
- assertSetParameterSucceeds("internalQueryCacheFeedbacksStored", 1);
- assertSetParameterSucceeds("internalQueryCacheFeedbacksStored", 0);
- assertSetParameterFails("internalQueryCacheFeedbacksStored", -1);
-
- assertSetParameterSucceeds("internalQueryCacheEvictionRatio", 1.0);
- assertSetParameterSucceeds("internalQueryCacheEvictionRatio", 0.0);
- assertSetParameterFails("internalQueryCacheEvictionRatio", -0.1);
-
- assertSetParameterSucceeds("internalQueryCacheWorksGrowthCoefficient", 1.1);
- assertSetParameterFails("internalQueryCacheWorksGrowthCoefficient", 1.0);
- assertSetParameterFails("internalQueryCacheWorksGrowthCoefficient", 0.1);
-
- assertSetParameterSucceeds("internalQueryPlannerMaxIndexedSolutions", 11);
- assertSetParameterSucceeds("internalQueryPlannerMaxIndexedSolutions", 0);
- assertSetParameterFails("internalQueryPlannerMaxIndexedSolutions", -1);
-
- assertSetParameterSucceeds("internalQueryEnumerationMaxOrSolutions", 11);
- assertSetParameterSucceeds("internalQueryEnumerationMaxOrSolutions", 0);
- assertSetParameterFails("internalQueryEnumerationMaxOrSolutions", -1);
-
- assertSetParameterSucceeds("internalQueryEnumerationMaxIntersectPerAnd", 11);
- assertSetParameterSucceeds("internalQueryEnumerationMaxIntersectPerAnd", 0);
- assertSetParameterFails("internalQueryEnumerationMaxIntersectPerAnd", -1);
-
- assertSetParameterSucceeds("internalQueryMaxScansToExplode", 11);
- assertSetParameterSucceeds("internalQueryMaxScansToExplode", 0);
- assertSetParameterFails("internalQueryMaxScansToExplode", -1);
-
- assertSetParameterSucceeds("internalQueryExecMaxBlockingSortBytes", 11);
- assertSetParameterSucceeds("internalQueryExecMaxBlockingSortBytes", 0);
- assertSetParameterFails("internalQueryExecMaxBlockingSortBytes", -1);
-
- assertSetParameterSucceeds("internalQueryExecYieldIterations", 10);
- assertSetParameterSucceeds("internalQueryExecYieldIterations", 0);
- assertSetParameterSucceeds("internalQueryExecYieldIterations", -1);
-
- assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 1);
- assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 0);
- assertSetParameterFails("internalQueryExecYieldPeriodMS", -1);
-
- assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 11);
- assertSetParameterSucceeds("internalQueryExecYieldPeriodMS", 0);
- assertSetParameterFails("internalQueryExecYieldPeriodMS", -1);
-
- assertSetParameterSucceeds("internalQueryFacetBufferSizeBytes", 1);
- assertSetParameterFails("internalQueryFacetBufferSizeBytes", 0);
- assertSetParameterFails("internalQueryFacetBufferSizeBytes", -1);
-
- assertSetParameterSucceeds("internalDocumentSourceSortMaxBlockingSortBytes", 11);
- assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", 0);
- assertSetParameterFails("internalDocumentSourceSortMaxBlockingSortBytes", -1);
-
- assertSetParameterSucceeds("internalDocumentSourceGroupMaxMemoryBytes", 11);
- assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", 0);
- assertSetParameterFails("internalDocumentSourceGroupMaxMemoryBytes", -1);
-
- // Internal BSON max object size is slightly larger than the max user object size, to
- // accommodate command metadata.
- const bsonUserSizeLimit = assert.commandWorked(testDB.isMaster()).maxBsonObjectSize;
- const bsonObjMaxInternalSize = bsonUserSizeLimit + 16 * 1024;
-
- assertSetParameterFails("internalLookupStageIntermediateDocumentMaxSizeBytes", 1);
- assertSetParameterSucceeds("internalLookupStageIntermediateDocumentMaxSizeBytes",
- bsonObjMaxInternalSize);
-
- assertSetParameterSucceeds("internalInsertMaxBatchSize", 11);
- assertSetParameterFails("internalInsertMaxBatchSize", 0);
- assertSetParameterFails("internalInsertMaxBatchSize", -1);
-
- assertSetParameterSucceeds("internalDocumentSourceCursorBatchSizeBytes", 11);
- assertSetParameterSucceeds("internalDocumentSourceCursorBatchSizeBytes", 0);
- assertSetParameterFails("internalDocumentSourceCursorBatchSizeBytes", -1);
-
- assertSetParameterSucceeds("internalDocumentSourceLookupCacheSizeBytes", 11);
- assertSetParameterSucceeds("internalDocumentSourceLookupCacheSizeBytes", 0);
- assertSetParameterFails("internalDocumentSourceLookupCacheSizeBytes", -1);
-
- MongoRunner.stopMongod(conn);
+assertSetParameterSucceeds("internalDocumentSourceLookupCacheSizeBytes", 11);
+assertSetParameterSucceeds("internalDocumentSourceLookupCacheSizeBytes", 0);
+assertSetParameterFails("internalDocumentSourceLookupCacheSizeBytes", -1);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/query_yield1.js b/jstests/noPassthrough/query_yield1.js
index 4effd6b370f..74485c45cb0 100644
--- a/jstests/noPassthrough/query_yield1.js
+++ b/jstests/noPassthrough/query_yield1.js
@@ -1,93 +1,93 @@
(function() {
- "use strict";
- if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
- const conn = MongoRunner.runMongod({nojournal: ""});
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
-
- t = db.query_yield1;
- t.drop();
-
- N = 20000;
- i = 0;
-
- q = function() {
- var x = this.n;
- for (var i = 0; i < 250; i++) {
- x = x * 2;
+"use strict";
+if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
+ const conn = MongoRunner.runMongod({nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
+
+ t = db.query_yield1;
+ t.drop();
+
+ N = 20000;
+ i = 0;
+
+ q = function() {
+ var x = this.n;
+ for (var i = 0; i < 250; i++) {
+ x = x * 2;
+ }
+ return false;
+ };
+
+ while (true) {
+ fill = function() {
+ var bulk = t.initializeUnorderedBulkOp();
+ for (; i < N; i++) {
+ bulk.insert({_id: i, n: 1});
}
- return false;
+ assert.writeOK(bulk.execute());
};
- while (true) {
- fill = function() {
- var bulk = t.initializeUnorderedBulkOp();
- for (; i < N; i++) {
- bulk.insert({_id: i, n: 1});
- }
- assert.writeOK(bulk.execute());
- };
-
- timeQuery = function() {
- return Date.timeFunc(function() {
- assert.eq(0, t.find(q).itcount());
- });
- };
-
- fill();
- timeQuery();
- timeQuery();
- time = timeQuery();
- print(N + "\t" + time);
- if (time > 2000)
- break;
-
- N *= 2;
- }
+ timeQuery = function() {
+ return Date.timeFunc(function() {
+ assert.eq(0, t.find(q).itcount());
+ });
+ };
- // --- test 1
+ fill();
+ timeQuery();
+ timeQuery();
+ time = timeQuery();
+ print(N + "\t" + time);
+ if (time > 2000)
+ break;
- assert.eq(0, db.currentOp().inprog.length, "setup broken");
+ N *= 2;
+ }
- join = startParallelShell(
- "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
+ // --- test 1
- assert.soon(function() {
- var x = db.currentOp().inprog;
- return x.length > 0;
- }, "never doing query", 2000, 1);
+ assert.eq(0, db.currentOp().inprog.length, "setup broken");
- print("start query");
+ join = startParallelShell(
+ "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
- num = 0;
- start = new Date();
- biggestMe = 0;
- while (((new Date()).getTime() - start) < (time * 2)) {
- var me = Date.timeFunc(function() {
- t.insert({x: 1});
- });
- var x = db.currentOp();
+ assert.soon(function() {
+ var x = db.currentOp().inprog;
+ return x.length > 0;
+ }, "never doing query", 2000, 1);
- if (num++ == 0) {
- assert.eq(1, x.inprog.length, "nothing in prog");
- }
+ print("start query");
- if (me > biggestMe) {
- biggestMe = me;
- print("biggestMe: " + biggestMe);
- }
-
- assert.gt(200, me, "took too long for me to run");
+ num = 0;
+ start = new Date();
+ biggestMe = 0;
+ while (((new Date()).getTime() - start) < (time * 2)) {
+ var me = Date.timeFunc(function() {
+ t.insert({x: 1});
+ });
+ var x = db.currentOp();
- if (x.inprog.length == 0)
- break;
+ if (num++ == 0) {
+ assert.eq(1, x.inprog.length, "nothing in prog");
}
- join();
+ if (me > biggestMe) {
+ biggestMe = me;
+ print("biggestMe: " + biggestMe);
+ }
- var x = db.currentOp();
- assert.eq(0, x.inprog.length, "weird 2");
+ assert.gt(200, me, "took too long for me to run");
- MongoRunner.stopMongod(conn);
+ if (x.inprog.length == 0)
+ break;
}
+
+ join();
+
+ var x = db.currentOp();
+ assert.eq(0, x.inprog.length, "weird 2");
+
+ MongoRunner.stopMongod(conn);
+}
})();
diff --git a/jstests/noPassthrough/query_yield2.js b/jstests/noPassthrough/query_yield2.js
index e5257653fd1..46816da5aea 100644
--- a/jstests/noPassthrough/query_yield2.js
+++ b/jstests/noPassthrough/query_yield2.js
@@ -1,153 +1,153 @@
(function() {
- "use strict";
- if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
+"use strict";
+if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
- var currentOp;
- var N;
- var i;
- var t;
- var q;
- var len;
- var num;
- var start;
- var insertTime;
+ var currentOp;
+ var N;
+ var i;
+ var t;
+ var q;
+ var len;
+ var num;
+ var start;
+ var insertTime;
- const conn = MongoRunner.runMongod({nojournal: ""});
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
+ const conn = MongoRunner.runMongod({nojournal: ""});
+ assert.neq(null, conn, "mongod failed to start.");
+ db = conn.getDB("test");
- t = db.query_yield2;
- t.drop();
+ t = db.query_yield2;
+ t.drop();
- N = 200;
- i = 0;
+ N = 200;
+ i = 0;
- q = function() {
- var x = this.n;
- for (var i = 0; i < 25000; i++) {
- x = x * 2;
- }
- return false;
- };
+ q = function() {
+ var x = this.n;
+ for (var i = 0; i < 25000; i++) {
+ x = x * 2;
+ }
+ return false;
+ };
- print("Shell ==== Creating test.query_yield2 collection ...");
- print(
- "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete");
- while (true) {
- fill = function() {
- var bulk = t.initializeUnorderedBulkOp();
- for (; i < N; ++i) {
- bulk.insert({_id: i, n: 1});
- }
- assert.writeOK(bulk.execute());
- };
- timeQuery = function() {
- return Date.timeFunc(function() {
- assert.eq(0, t.find(q).itcount());
- });
- };
- print("Shell ==== Adding document IDs from " + i + " to " + (N - 1));
- fill();
- print("Shell ==== Running warm-up query 1");
- timeQuery();
- print("Shell ==== Running warm-up query 2");
- timeQuery();
- print("Shell ==== Running timed query ...");
- time = timeQuery();
- print("Shell ==== Query across " + N + " documents took " + time + " ms");
- if (time > 2000) {
- print("Shell ==== Reached desired 2000 ms mark (at " + time +
- " ms), proceding to next step");
- break;
+ print("Shell ==== Creating test.query_yield2 collection ...");
+ print(
+ "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete");
+ while (true) {
+ fill = function() {
+ var bulk = t.initializeUnorderedBulkOp();
+ for (; i < N; ++i) {
+ bulk.insert({_id: i, n: 1});
}
- N *= 2;
- print("Shell ==== Did not reach 2000 ms, increasing fill point to " + N + " documents");
+ assert.writeOK(bulk.execute());
+ };
+ timeQuery = function() {
+ return Date.timeFunc(function() {
+ assert.eq(0, t.find(q).itcount());
+ });
+ };
+ print("Shell ==== Adding document IDs from " + i + " to " + (N - 1));
+ fill();
+ print("Shell ==== Running warm-up query 1");
+ timeQuery();
+ print("Shell ==== Running warm-up query 2");
+ timeQuery();
+ print("Shell ==== Running timed query ...");
+ time = timeQuery();
+ print("Shell ==== Query across " + N + " documents took " + time + " ms");
+ if (time > 2000) {
+ print("Shell ==== Reached desired 2000 ms mark (at " + time +
+ " ms), proceding to next step");
+ break;
}
+ N *= 2;
+ print("Shell ==== Did not reach 2000 ms, increasing fill point to " + N + " documents");
+ }
- print("Shell ==== Testing db.currentOp to make sure nothing is in progress");
- print("Shell ==== Dump of db.currentOp:");
+ print("Shell ==== Testing db.currentOp to make sure nothing is in progress");
+ print("Shell ==== Dump of db.currentOp:");
+ currentOp = db.currentOp();
+ print(tojson(currentOp));
+ len = currentOp.inprog.length;
+ if (len) {
+ print("Shell ==== This test is broken: db.currentOp().inprog.length is " + len);
+ throw Error("query_yield2.js test is broken");
+ }
+ print("Shell ==== The test is working so far: db.currentOp().inprog.length is " + len);
+
+ print("Shell ==== Starting parallel shell to test if slow query will yield to write");
+ join = startParallelShell(
+ "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
+
+ print("Shell ==== Waiting until db.currentOp().inprog becomes non-empty");
+ assert.soon(function() {
currentOp = db.currentOp();
- print(tojson(currentOp));
len = currentOp.inprog.length;
if (len) {
- print("Shell ==== This test is broken: db.currentOp().inprog.length is " + len);
- throw Error("query_yield2.js test is broken");
+ print("Shell ==== Wait satisfied: db.currentOp().inprog.length is " + len);
+ print("Shell ==== Dump of db.currentOp:");
+ print(tojson(currentOp));
+ print("Shell ==== Checking if this currentOp is the query we are waiting for");
+ if (currentOp.inprog[0].ns == "test.query_yield2" &&
+ currentOp.inprog[0].query["$where"]) {
+ print("Shell ==== Yes, we found the query we are waiting for");
+ return true;
+ }
+ if (currentOp.inprog[0].ns == "" && currentOp.inprog[0].query["whatsmyuri"]) {
+ print("Shell ==== No, we found a \"whatsmyuri\" query, waiting some more");
+ return false;
+ }
+ print(
+ "Shell ==== No, we found something other than our query or a \"whatsmyuri\", waiting some more");
+ return false;
}
- print("Shell ==== The test is working so far: db.currentOp().inprog.length is " + len);
+ return len > 0;
+ }, "Wait failed, db.currentOp().inprog never became non-empty", 2000, 1);
- print("Shell ==== Starting parallel shell to test if slow query will yield to write");
- join = startParallelShell(
- "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
-
- print("Shell ==== Waiting until db.currentOp().inprog becomes non-empty");
- assert.soon(function() {
- currentOp = db.currentOp();
- len = currentOp.inprog.length;
- if (len) {
- print("Shell ==== Wait satisfied: db.currentOp().inprog.length is " + len);
+ print(
+ "Shell ==== Now that we have seen db.currentOp().inprog show that our query is running, we start the real test");
+ num = 0;
+ start = new Date();
+ while (((new Date()).getTime() - start) < (time * 2)) {
+ if (num == 0) {
+ print("Shell ==== Starting loop " + num + ", inserting 1 document");
+ }
+ insertTime = Date.timeFunc(function() {
+ t.insert({x: 1});
+ });
+ currentOp = db.currentOp();
+ len = currentOp.inprog.length;
+ print("Shell ==== Time to insert document " + num + " was " + insertTime +
+ " ms, db.currentOp().inprog.length is " + len);
+ if (num++ == 0) {
+ if (len != 1) {
+ print("Shell ==== TEST FAILED! db.currentOp().inprog.length is " + len);
print("Shell ==== Dump of db.currentOp:");
print(tojson(currentOp));
- print("Shell ==== Checking if this currentOp is the query we are waiting for");
- if (currentOp.inprog[0].ns == "test.query_yield2" &&
- currentOp.inprog[0].query["$where"]) {
- print("Shell ==== Yes, we found the query we are waiting for");
- return true;
- }
- if (currentOp.inprog[0].ns == "" && currentOp.inprog[0].query["whatsmyuri"]) {
- print("Shell ==== No, we found a \"whatsmyuri\" query, waiting some more");
- return false;
- }
- print(
- "Shell ==== No, we found something other than our query or a \"whatsmyuri\", waiting some more");
- return false;
- }
- return len > 0;
- }, "Wait failed, db.currentOp().inprog never became non-empty", 2000, 1);
-
- print(
- "Shell ==== Now that we have seen db.currentOp().inprog show that our query is running, we start the real test");
- num = 0;
- start = new Date();
- while (((new Date()).getTime() - start) < (time * 2)) {
- if (num == 0) {
- print("Shell ==== Starting loop " + num + ", inserting 1 document");
- }
- insertTime = Date.timeFunc(function() {
- t.insert({x: 1});
- });
- currentOp = db.currentOp();
- len = currentOp.inprog.length;
- print("Shell ==== Time to insert document " + num + " was " + insertTime +
- " ms, db.currentOp().inprog.length is " + len);
- if (num++ == 0) {
- if (len != 1) {
- print("Shell ==== TEST FAILED! db.currentOp().inprog.length is " + len);
- print("Shell ==== Dump of db.currentOp:");
- print(tojson(currentOp));
- throw Error("TEST FAILED!");
- }
- }
- assert.gt(200,
- insertTime,
- "Insert took too long (" + insertTime + " ms), should be less than 200 ms");
- if (currentOp.inprog.length == 0) {
- break;
+ throw Error("TEST FAILED!");
}
}
+ assert.gt(200,
+ insertTime,
+ "Insert took too long (" + insertTime + " ms), should be less than 200 ms");
+ if (currentOp.inprog.length == 0) {
+ break;
+ }
+ }
- print("Shell ==== Finished inserting documents, reader also finished");
- print("Shell ==== Waiting for parallel shell to exit");
- join();
+ print("Shell ==== Finished inserting documents, reader also finished");
+ print("Shell ==== Waiting for parallel shell to exit");
+ join();
- currentOp = db.currentOp();
- len = currentOp.inprog.length;
- if (len != 0) {
- print("Shell ==== Final sanity check FAILED! db.currentOp().inprog.length is " + len);
- print("Shell ==== Dump of db.currentOp:");
- print(tojson(currentOp));
- throw Error("TEST FAILED!");
- }
- print("Shell ==== Test completed successfully, shutting down server");
- MongoRunner.stopMongod(conn);
+ currentOp = db.currentOp();
+ len = currentOp.inprog.length;
+ if (len != 0) {
+ print("Shell ==== Final sanity check FAILED! db.currentOp().inprog.length is " + len);
+ print("Shell ==== Dump of db.currentOp:");
+ print(tojson(currentOp));
+ throw Error("TEST FAILED!");
}
+ print("Shell ==== Test completed successfully, shutting down server");
+ MongoRunner.stopMongod(conn);
+}
})();
diff --git a/jstests/noPassthrough/query_yield_reset_timer.js b/jstests/noPassthrough/query_yield_reset_timer.js
index 3bdb81730f7..cd7d9cf7d16 100644
--- a/jstests/noPassthrough/query_yield_reset_timer.js
+++ b/jstests/noPassthrough/query_yield_reset_timer.js
@@ -1,45 +1,45 @@
// Tests the reset logic for the periodic query yield timer. Regression test for SERVER-21341.
(function() {
- 'use strict';
- var dbpath = MongoRunner.dataPath + jsTest.name();
- resetDbpath(dbpath);
- var mongod = MongoRunner.runMongod({dbpath: dbpath});
- var coll = mongod.getDB("test").getCollection(jsTest.name());
+'use strict';
+var dbpath = MongoRunner.dataPath + jsTest.name();
+resetDbpath(dbpath);
+var mongod = MongoRunner.runMongod({dbpath: dbpath});
+var coll = mongod.getDB("test").getCollection(jsTest.name());
- // Configure the server so that queries are expected to yield after every 10 work cycles, or
- // after every 500 milliseconds (whichever comes first). In addition, enable a failpoint that
- // introduces a sleep delay of 1 second during each yield.
- assert.commandWorked(
- coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldIterations: 10}));
- assert.commandWorked(
- coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 500}));
- assert.commandWorked(coll.getDB().adminCommand({
- configureFailPoint: "setYieldAllLocksWait",
- namespace: coll.getFullName(),
- mode: "alwaysOn",
- data: {waitForMillis: 1000}
- }));
+// Configure the server so that queries are expected to yield after every 10 work cycles, or
+// after every 500 milliseconds (whichever comes first). In addition, enable a failpoint that
+// introduces a sleep delay of 1 second during each yield.
+assert.commandWorked(
+ coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldIterations: 10}));
+assert.commandWorked(
+ coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 500}));
+assert.commandWorked(coll.getDB().adminCommand({
+ configureFailPoint: "setYieldAllLocksWait",
+ namespace: coll.getFullName(),
+ mode: "alwaysOn",
+ data: {waitForMillis: 1000}
+}));
- // Insert 40 documents in the collection, perform a collection scan, and verify that it yields
- // about 4 times. Since each group of 10 documents should always be processed in less than 500
- // milliseconds, we expect to hit only iteration-based yields for this query, and no
- // timing-based yields. 40 documents total divided by 10 documents per yield gives us an
- // estimated yield count of 4 yields.
- //
- // Note also that we have a 1-second sleep delay during each yield, and we expect this delay to
- // not change our expectation to hit zero timing-based yields. Timing-based yields only consider
- // time spent during query execution since the last yield; since our sleep delay of 1 second is
- // not during query execution, it should never count towards our 500 millisecond threshold for a
- // timing-based yield (incorrect accounting for timing-based yields was the cause for
- // SERVER-21341).
- for (var i = 0; i < 40; ++i) {
- assert.writeOK(coll.insert({}));
- }
- var explainRes = coll.find().explain("executionStats");
- // We expect 4 yields, but we throw in a fudge factor of 2 for test reliability. We also can
- // use "saveState" calls as a proxy for "number of yields" here, because we expect our entire
- // result set to be returned in a single batch.
- assert.gt(explainRes.executionStats.executionStages.saveState, 4 / 2, tojson(explainRes));
- assert.lt(explainRes.executionStats.executionStages.saveState, 4 * 2, tojson(explainRes));
- MongoRunner.stopMongod(mongod);
+// Insert 40 documents in the collection, perform a collection scan, and verify that it yields
+// about 4 times. Since each group of 10 documents should always be processed in less than 500
+// milliseconds, we expect to hit only iteration-based yields for this query, and no
+// timing-based yields. 40 documents total divided by 10 documents per yield gives us an
+// estimated yield count of 4 yields.
+//
+// Note also that we have a 1-second sleep delay during each yield, and we expect this delay to
+// not change our expectation to hit zero timing-based yields. Timing-based yields only consider
+// time spent during query execution since the last yield; since our sleep delay of 1 second is
+// not during query execution, it should never count towards our 500 millisecond threshold for a
+// timing-based yield (incorrect accounting for timing-based yields was the cause for
+// SERVER-21341).
+for (var i = 0; i < 40; ++i) {
+ assert.writeOK(coll.insert({}));
+}
+var explainRes = coll.find().explain("executionStats");
+// We expect 4 yields, but we throw in a fudge factor of 2 for test reliability. We also can
+// use "saveState" calls as a proxy for "number of yields" here, because we expect our entire
+// result set to be returned in a single batch.
+assert.gt(explainRes.executionStats.executionStages.saveState, 4 / 2, tojson(explainRes));
+assert.lt(explainRes.executionStats.executionStages.saveState, 4 * 2, tojson(explainRes));
+MongoRunner.stopMongod(mongod);
})();
diff --git a/jstests/noPassthrough/queryable_backup_mode_incompatible_options.js b/jstests/noPassthrough/queryable_backup_mode_incompatible_options.js
index 3c9f09ba38c..d344d2648c2 100644
--- a/jstests/noPassthrough/queryable_backup_mode_incompatible_options.js
+++ b/jstests/noPassthrough/queryable_backup_mode_incompatible_options.js
@@ -9,58 +9,54 @@
// Check that starting mongod with both --queryableBackupMode and --replSet fails.
(function() {
- "use strict";
+"use strict";
- var name = "queryable_backup_mode_repl_set";
- var dbdir = MongoRunner.dataPath + name + "/";
+var name = "queryable_backup_mode_repl_set";
+var dbdir = MongoRunner.dataPath + name + "/";
- resetDbpath(dbdir);
+resetDbpath(dbdir);
- // Insert dummy document to ensure startup failure isn't due to lack of storage metadata file.
- var conn = MongoRunner.runMongod({dbpath: dbdir, noCleanData: true});
- assert.neq(null, conn, "mongod was unable to start up");
+// Insert dummy document to ensure startup failure isn't due to lack of storage metadata file.
+var conn = MongoRunner.runMongod({dbpath: dbdir, noCleanData: true});
+assert.neq(null, conn, "mongod was unable to start up");
- var coll = conn.getCollection('test.foo');
- coll.insertOne({a: 1});
- MongoRunner.stopMongod(conn);
+var coll = conn.getCollection('test.foo');
+coll.insertOne({a: 1});
+MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', replSet: 'bar'});
+conn = MongoRunner.runMongod(
+ {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', replSet: 'bar'});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --replSet are provided");
+assert.eq(null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --replSet are provided");
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', configsvr: ''});
+conn = MongoRunner.runMongod(
+ {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', configsvr: ''});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --configsvr are provided");
+assert.eq(
+ null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --configsvr are provided");
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', upgrade: ''});
+conn =
+ MongoRunner.runMongod({dbpath: dbdir, noCleanData: true, queryableBackupMode: '', upgrade: ''});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --upgrade are provided");
+assert.eq(null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --upgrade are provided");
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', repair: ''});
+conn =
+ MongoRunner.runMongod({dbpath: dbdir, noCleanData: true, queryableBackupMode: '', repair: ''});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --repair are provided");
+assert.eq(null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --repair are provided");
- conn = MongoRunner.runMongod(
- {dbpath: dbdir, noCleanData: true, queryableBackupMode: '', profile: 1});
+conn =
+ MongoRunner.runMongod({dbpath: dbdir, noCleanData: true, queryableBackupMode: '', profile: 1});
- assert.eq(
- null,
- conn,
- "mongod should fail to start when both --queryableBackupMode and --profile are provided");
+assert.eq(null,
+ conn,
+ "mongod should fail to start when both --queryableBackupMode and --profile are provided");
})();
diff --git a/jstests/noPassthrough/readConcern_atClusterTime.js b/jstests/noPassthrough/readConcern_atClusterTime.js
index 222d7b31ec8..926145d64e5 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime.js
@@ -12,159 +12,142 @@ function _getClusterTime(rst) {
}
(function() {
- "use strict";
+"use strict";
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+const dbName = "test";
+const collName = "coll";
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const testDB = rst.getPrimary().getDB(dbName);
- const dbName = "test";
- const collName = "coll";
+if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
+ rst.stopSet();
+ return;
+}
- const rst = new ReplSetTest({nodes: 1});
+const session = testDB.getMongo().startSession({causalConsistency: false});
+const sessionDb = session.getDatabase(dbName);
+
+const clusterTime = _getClusterTime(rst);
+
+// 'atClusterTime' can be used with readConcern level 'snapshot'.
+session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// 'atClusterTime' cannot be greater than the current cluster time.
+const futureClusterTime = new Timestamp(clusterTime.getTime() + 1000, 1);
+session.startTransaction({readConcern: {level: "snapshot", atClusterTime: futureClusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' must have type Timestamp.
+session.startTransaction({readConcern: {level: "snapshot", atClusterTime: "bad"}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.TypeMismatch);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with readConcern level 'majority'.
+session.startTransaction({readConcern: {level: "majority", atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with readConcern level 'local'.
+session.startTransaction({readConcern: {level: "local", atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with readConcern level 'available'.
+session.startTransaction({readConcern: {level: "available", atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with readConcern level 'linearizable'.
+session.startTransaction({readConcern: {level: "linearizable", atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used without readConcern level (level is 'local' by default).
+session.startTransaction({readConcern: {atClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used with 'afterOpTime'.
+session.startTransaction({
+ readConcern:
+ {level: "snapshot", atClusterTime: clusterTime, afterOpTime: {ts: Timestamp(1, 2), t: 1}}
+});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// 'atClusterTime' cannot be used outside of a session.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {find: collName, readConcern: {level: "snapshot", atClusterTime: clusterTime}}),
+ ErrorCodes.InvalidOptions);
+
+// 'atClusterTime' cannot be used with 'afterClusterTime'.
+session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: clusterTime, afterClusterTime: clusterTime}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+session.endSession();
+rst.stopSet();
+
+// readConcern with 'atClusterTime' should succeed regardless of value of 'enableTestCommands'.
+{
+ jsTest.setOption('enableTestCommands', false);
+ let rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
- const testDB = rst.getPrimary().getDB(dbName);
-
- if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- rst.stopSet();
- return;
- }
-
- const session = testDB.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase(dbName);
-
- const clusterTime = _getClusterTime(rst);
-
- // 'atClusterTime' can be used with readConcern level 'snapshot'.
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+ let session =
+ rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+ let sessionDb = session.getDatabase(dbName);
+ session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
assert.commandWorked(sessionDb.runCommand({find: collName}));
assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+ rst.stopSet();
- // 'atClusterTime' cannot be greater than the current cluster time.
- const futureClusterTime = new Timestamp(clusterTime.getTime() + 1000, 1);
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: futureClusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' must have type Timestamp.
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: "bad"}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.TypeMismatch);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with readConcern level 'majority'.
- session.startTransaction({readConcern: {level: "majority", atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with readConcern level 'local'.
- session.startTransaction({readConcern: {level: "local", atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with readConcern level 'available'.
- session.startTransaction({readConcern: {level: "available", atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with readConcern level 'linearizable'.
- session.startTransaction({readConcern: {level: "linearizable", atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used without readConcern level (level is 'local' by default).
- session.startTransaction({readConcern: {atClusterTime: clusterTime}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // 'atClusterTime' cannot be used with 'afterOpTime'.
- session.startTransaction({
- readConcern: {
- level: "snapshot",
- atClusterTime: clusterTime,
- afterOpTime: {ts: Timestamp(1, 2), t: 1}
- }
- });
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ jsTest.setOption('enableTestCommands', true);
+ rst = new ReplSetTest({nodes: 1});
+ rst.startSet();
+ rst.initiate();
+ session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+ sessionDb = session.getDatabase(dbName);
+ session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
+ assert.commandWorked(sessionDb.runCommand({find: collName}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+ rst.stopSet();
+}
- // 'atClusterTime' cannot be used outside of a session.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {find: collName, readConcern: {level: "snapshot", atClusterTime: clusterTime}}),
- ErrorCodes.InvalidOptions);
-
- // 'atClusterTime' cannot be used with 'afterClusterTime'.
- session.startTransaction({
- readConcern:
- {level: "snapshot", atClusterTime: clusterTime, afterClusterTime: clusterTime}
- });
+// readConcern with 'atClusterTime' is not allowed when enableMajorityReadConcern=false.
+{
+ let rst = new ReplSetTest({nodes: [{"enableMajorityReadConcern": "false"}]});
+ rst.startSet();
+ rst.initiate();
+ let session =
+ rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+ let sessionDb = session.getDatabase(dbName);
+ session.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
assert.commandFailedWithCode(session.abortTransaction_forTesting(),
ErrorCodes.NoSuchTransaction);
-
session.endSession();
rst.stopSet();
-
- // readConcern with 'atClusterTime' should succeed regardless of value of 'enableTestCommands'.
- {
- jsTest.setOption('enableTestCommands', false);
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- let session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- let sessionDb = session.getDatabase(dbName);
- session.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
- assert.commandWorked(sessionDb.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- rst.stopSet();
-
- jsTest.setOption('enableTestCommands', true);
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
- session.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
- assert.commandWorked(sessionDb.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- rst.stopSet();
- }
-
- // readConcern with 'atClusterTime' is not allowed when enableMajorityReadConcern=false.
- {
- let rst = new ReplSetTest({nodes: [{"enableMajorityReadConcern": "false"}]});
- rst.startSet();
- rst.initiate();
- let session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- let sessionDb = session.getDatabase(dbName);
- session.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: _getClusterTime(rst)}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- session.endSession();
- rst.stopSet();
- }
-
+}
}());
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
index 0a20621ed3e..c065ae258aa 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
@@ -3,107 +3,104 @@
// as an actual opTime on another shard.
// @tags: [requires_sharding, uses_transactions, uses_atclustertime]
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
+"use strict";
+load("jstests/replsets/rslib.js");
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+if (!assert.commandWorked(conn.getDB("test").serverStatus())
+ .storageEngine.supportsSnapshotReadConcern) {
+ MongoRunner.stopMongod(conn);
+ return;
+}
+MongoRunner.stopMongod(conn);
+
+const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
+
+// Create database "test0" on shard 0.
+const testDB0 = st.s.getDB("test0");
+assert.commandWorked(testDB0.adminCommand({enableSharding: testDB0.getName()}));
+st.ensurePrimaryShard(testDB0.getName(), st.shard0.shardName);
+assert.commandWorked(testDB0.createCollection("coll0"));
+
+// Create a database "test1" on shard 1.
+const testDB1 = st.s.getDB("test1");
+assert.commandWorked(testDB1.adminCommand({enableSharding: testDB1.getName()}));
+st.ensurePrimaryShard(testDB1.getName(), st.shard1.shardName);
+assert.commandWorked(testDB1.createCollection("coll1"));
+
+const PropagationPreferenceOptions = Object.freeze({kShard: 0, kConfig: 1});
+
+let testNoopWrite = (fromDbName, fromColl, toRS, toDbName, toColl, propagationPreference) => {
+ const fromDBFromMongos = st.s.getDB(fromDbName);
+ const toDBFromMongos = st.s.getDB(toDbName);
+ const configFromMongos = st.s.getDB("config");
+
+ const oplog = toRS.getPrimary().getCollection("local.oplog.rs");
+ let findRes = oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
+ assert(!findRes);
+
+ // Perform a write on the fromDB and get its op time.
+ let res = assert.commandWorked(
+ fromDBFromMongos.runCommand({insert: fromColl, documents: [{_id: 0}]}));
+ assert(res.hasOwnProperty("operationTime"), tojson(res));
+ let clusterTime = res.operationTime;
+
+ // Propagate 'clusterTime' to toRS or the config server. This ensures that its next
+ // write will be at time >= 'clusterTime'. We cannot use toDBFromMongos to propagate
+ // 'clusterTime' to the config server, because mongos only routes to the config server
+ // for the 'config' and 'admin' databases.
+ if (propagationPreference == PropagationPreferenceOptions.kConfig) {
+ configFromMongos.coll1.find().itcount();
+ } else {
+ toDBFromMongos.toColl.find().itcount();
}
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- if (!assert.commandWorked(conn.getDB("test").serverStatus())
- .storageEngine.supportsSnapshotReadConcern) {
- MongoRunner.stopMongod(conn);
- return;
+ // Attempt a snapshot read at 'clusterTime' on toRS. Test that it performs a noop write
+ // to advance its lastApplied optime past 'clusterTime'. The snapshot read itself may
+ // fail if the noop write advances the node's majority commit point past 'clusterTime'
+ // and it releases that snapshot.
+ const toRSSession =
+ toRS.getPrimary().getDB(toDBFromMongos).getMongo().startSession({causalConsistency: false});
+
+ toRSSession.startTransaction({readConcern: {level: "snapshot", atClusterTime: clusterTime}});
+ res = toRSSession.getDatabase(toDBFromMongos).runCommand({find: toColl});
+ if (res.ok === 0) {
+ assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld);
+ assert.commandFailedWithCode(toRSSession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ } else {
+ assert.commandWorked(toRSSession.commitTransaction_forTesting());
}
- MongoRunner.stopMongod(conn);
- const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
-
- // Create database "test0" on shard 0.
- const testDB0 = st.s.getDB("test0");
- assert.commandWorked(testDB0.adminCommand({enableSharding: testDB0.getName()}));
- st.ensurePrimaryShard(testDB0.getName(), st.shard0.shardName);
- assert.commandWorked(testDB0.createCollection("coll0"));
-
- // Create a database "test1" on shard 1.
- const testDB1 = st.s.getDB("test1");
- assert.commandWorked(testDB1.adminCommand({enableSharding: testDB1.getName()}));
- st.ensurePrimaryShard(testDB1.getName(), st.shard1.shardName);
- assert.commandWorked(testDB1.createCollection("coll1"));
-
- const PropagationPreferenceOptions = Object.freeze({kShard: 0, kConfig: 1});
-
- let testNoopWrite = (fromDbName, fromColl, toRS, toDbName, toColl, propagationPreference) => {
- const fromDBFromMongos = st.s.getDB(fromDbName);
- const toDBFromMongos = st.s.getDB(toDbName);
- const configFromMongos = st.s.getDB("config");
-
- const oplog = toRS.getPrimary().getCollection("local.oplog.rs");
- let findRes =
- oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
- assert(!findRes);
-
- // Perform a write on the fromDB and get its op time.
- let res = assert.commandWorked(
- fromDBFromMongos.runCommand({insert: fromColl, documents: [{_id: 0}]}));
- assert(res.hasOwnProperty("operationTime"), tojson(res));
- let clusterTime = res.operationTime;
-
- // Propagate 'clusterTime' to toRS or the config server. This ensures that its next
- // write will be at time >= 'clusterTime'. We cannot use toDBFromMongos to propagate
- // 'clusterTime' to the config server, because mongos only routes to the config server
- // for the 'config' and 'admin' databases.
- if (propagationPreference == PropagationPreferenceOptions.kConfig) {
- configFromMongos.coll1.find().itcount();
- } else {
- toDBFromMongos.toColl.find().itcount();
- }
-
- // Attempt a snapshot read at 'clusterTime' on toRS. Test that it performs a noop write
- // to advance its lastApplied optime past 'clusterTime'. The snapshot read itself may
- // fail if the noop write advances the node's majority commit point past 'clusterTime'
- // and it releases that snapshot.
- const toRSSession = toRS.getPrimary().getDB(toDBFromMongos).getMongo().startSession({
- causalConsistency: false
- });
-
- toRSSession.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: clusterTime}});
- res = toRSSession.getDatabase(toDBFromMongos).runCommand({find: toColl});
- if (res.ok === 0) {
- assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld);
- assert.commandFailedWithCode(toRSSession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- } else {
- assert.commandWorked(toRSSession.commitTransaction_forTesting());
- }
-
- const toRSOpTime = getLastOpTime(toRS.getPrimary()).ts;
-
- assert.gte(toRSOpTime, clusterTime);
-
- findRes = oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
- assert(findRes);
- };
-
- //
- // Test noop write. Read from the destination shard.
- //
-
- testNoopWrite("test0", "coll0", st.rs1, "test1", "coll1", PropagationPreferenceOptions.kShard);
-
- //
- // Test noop write. Read from the config server's primary.
- //
-
- testNoopWrite(
- "test0", "coll2", st.configRS, "test1", "coll3", PropagationPreferenceOptions.kConfig);
-
- st.stop();
+ const toRSOpTime = getLastOpTime(toRS.getPrimary()).ts;
+
+ assert.gte(toRSOpTime, clusterTime);
+
+ findRes = oplog.findOne({o: {$eq: {"noop write for afterClusterTime read concern": 1}}});
+ assert(findRes);
+};
+
+//
+// Test noop write. Read from the destination shard.
+//
+
+testNoopWrite("test0", "coll0", st.rs1, "test1", "coll1", PropagationPreferenceOptions.kShard);
+
+//
+// Test noop write. Read from the config server's primary.
+//
+
+testNoopWrite(
+ "test0", "coll2", st.configRS, "test1", "coll3", PropagationPreferenceOptions.kConfig);
+
+st.stop();
}());
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js b/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js
index e9e92c88da1..49b3b16da66 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime_snapshot_selection.js
@@ -4,93 +4,91 @@
//
// @tags: [uses_transactions, requires_majority_read_concern]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // For stopServerReplication.
+load("jstests/libs/write_concern_util.js"); // For stopServerReplication.
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const rst = new ReplSetTest({nodes: 3, settings: {chainingAllowed: false}});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 3, settings: {chainingAllowed: false}});
+rst.startSet();
+rst.initiate();
- const primarySession =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- const primaryDB = primarySession.getDatabase(dbName);
+const primarySession =
+ rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+const primaryDB = primarySession.getDatabase(dbName);
- const secondaryConns = rst.getSecondaries();
- const secondaryConn0 = secondaryConns[0];
- const secondaryConn1 = secondaryConns[1];
- const secondarySession =
- secondaryConn0.getDB(dbName).getMongo().startSession({causalConsistency: false});
- const secondaryDB0 = secondarySession.getDatabase(dbName);
+const secondaryConns = rst.getSecondaries();
+const secondaryConn0 = secondaryConns[0];
+const secondaryConn1 = secondaryConns[1];
+const secondarySession =
+ secondaryConn0.getDB(dbName).getMongo().startSession({causalConsistency: false});
+const secondaryDB0 = secondarySession.getDatabase(dbName);
- // Create the collection and insert one document. Get the op time of the write.
- let res = assert.commandWorked(primaryDB.runCommand(
- {insert: collName, documents: [{_id: "before"}], writeConcern: {w: "majority"}}));
- const clusterTimePrimaryBefore = res.opTime.ts;
+// Create the collection and insert one document. Get the op time of the write.
+let res = assert.commandWorked(primaryDB.runCommand(
+ {insert: collName, documents: [{_id: "before"}], writeConcern: {w: "majority"}}));
+const clusterTimePrimaryBefore = res.opTime.ts;
- // Wait for the majority commit point on 'secondaryDB0' to include the {_id: "before"} write.
- assert.soonNoExcept(function() {
- return assert
- .commandWorked(secondaryDB0.runCommand(
- {find: collName, readConcern: {level: "majority"}, maxTimeMS: 10000}))
- .cursor.firstBatch.length === 1;
- });
+// Wait for the majority commit point on 'secondaryDB0' to include the {_id: "before"} write.
+assert.soonNoExcept(function() {
+ return assert
+ .commandWorked(secondaryDB0.runCommand(
+ {find: collName, readConcern: {level: "majority"}, maxTimeMS: 10000}))
+ .cursor.firstBatch.length === 1;
+});
- // Stop replication on both secondaries.
- stopServerReplication(secondaryConn0);
- stopServerReplication(secondaryConn1);
+// Stop replication on both secondaries.
+stopServerReplication(secondaryConn0);
+stopServerReplication(secondaryConn1);
- // Perform write and get the op time of the write.
- res =
- assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{_id: "after"}]}));
- assert(res.hasOwnProperty("opTime"), tojson(res));
- assert(res.opTime.hasOwnProperty("ts"), tojson(res));
- let clusterTimeAfter = res.opTime.ts;
+// Perform write and get the op time of the write.
+res = assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{_id: "after"}]}));
+assert(res.hasOwnProperty("opTime"), tojson(res));
+assert(res.opTime.hasOwnProperty("ts"), tojson(res));
+let clusterTimeAfter = res.opTime.ts;
- // A read on the primary at the old cluster time should not include the write.
- primarySession.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: clusterTimePrimaryBefore}});
- res = assert.commandWorked(primaryDB.runCommand({find: collName}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
- assert.eq(res.cursor.firstBatch.length, 1, printjson(res));
- assert.eq(res.cursor.firstBatch[0]._id, "before", printjson(res));
+// A read on the primary at the old cluster time should not include the write.
+primarySession.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: clusterTimePrimaryBefore}});
+res = assert.commandWorked(primaryDB.runCommand({find: collName}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+assert.eq(res.cursor.firstBatch.length, 1, printjson(res));
+assert.eq(res.cursor.firstBatch[0]._id, "before", printjson(res));
- // A read on the primary at the new cluster time should succeed because transactions implement
- // speculative behavior, but the attempt to commit the transaction should time out waiting for
- // the transaction to be majority committed.
- primarySession.startTransaction({
- readConcern: {level: "snapshot", atClusterTime: clusterTimeAfter},
- writeConcern: {w: "majority", wtimeout: 1000}
- });
- res = assert.commandWorked(primaryDB.runCommand({find: collName}));
- assert.eq(res.cursor.firstBatch.length, 2, printjson(res));
- assert.commandFailedWithCode(primarySession.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
+// A read on the primary at the new cluster time should succeed because transactions implement
+// speculative behavior, but the attempt to commit the transaction should time out waiting for
+// the transaction to be majority committed.
+primarySession.startTransaction({
+ readConcern: {level: "snapshot", atClusterTime: clusterTimeAfter},
+ writeConcern: {w: "majority", wtimeout: 1000}
+});
+res = assert.commandWorked(primaryDB.runCommand({find: collName}));
+assert.eq(res.cursor.firstBatch.length, 2, printjson(res));
+assert.commandFailedWithCode(primarySession.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
- // A read on the primary at the new cluster time succeeds.
- primarySession.startTransaction({
- readConcern: {level: "snapshot", atClusterTime: clusterTimeAfter},
- writeConcern: {w: "majority"}
- });
- res = assert.commandWorked(primaryDB.runCommand({find: collName}));
- assert.eq(res.cursor.firstBatch.length, 2, printjson(res));
- // Restart replication on one of the secondaries.
- restartServerReplication(secondaryConn1);
- // This time the transaction should commit.
- assert.commandWorked(primarySession.commitTransaction_forTesting());
+// A read on the primary at the new cluster time succeeds.
+primarySession.startTransaction({
+ readConcern: {level: "snapshot", atClusterTime: clusterTimeAfter},
+ writeConcern: {w: "majority"}
+});
+res = assert.commandWorked(primaryDB.runCommand({find: collName}));
+assert.eq(res.cursor.firstBatch.length, 2, printjson(res));
+// Restart replication on one of the secondaries.
+restartServerReplication(secondaryConn1);
+// This time the transaction should commit.
+assert.commandWorked(primarySession.commitTransaction_forTesting());
- // Restart replication on the lagged secondary.
- restartServerReplication(secondaryConn0);
+// Restart replication on the lagged secondary.
+restartServerReplication(secondaryConn0);
- // A read at a time that is too old fails.
- primarySession.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}});
- assert.commandFailedWithCode(primaryDB.runCommand({find: collName}), ErrorCodes.SnapshotTooOld);
- assert.commandFailedWithCode(primarySession.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+// A read at a time that is too old fails.
+primarySession.startTransaction({readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}});
+assert.commandFailedWithCode(primaryDB.runCommand({find: collName}), ErrorCodes.SnapshotTooOld);
+assert.commandFailedWithCode(primarySession.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/readConcern_snapshot.js b/jstests/noPassthrough/readConcern_snapshot.js
index 63413d16820..0416cd689ce 100644
--- a/jstests/noPassthrough/readConcern_snapshot.js
+++ b/jstests/noPassthrough/readConcern_snapshot.js
@@ -1,138 +1,134 @@
// Test parsing of readConcern level 'snapshot'.
// @tags: [requires_replication, uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- //
- // Configurations.
- //
-
- // Transactions should fail on storage engines that do not support them.
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- let session =
- rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- let sessionDb = session.getDatabase(dbName);
- if (!sessionDb.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- // Transactions with readConcern snapshot fail.
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- [ErrorCodes.NoSuchTransaction, ErrorCodes.IllegalOperation]);
-
- // Transactions without readConcern snapshot fail.
- session.startTransaction();
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
- ErrorCodes.IllegalOperation);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- [ErrorCodes.NoSuchTransaction, ErrorCodes.IllegalOperation]);
-
- rst.stopSet();
- return;
- }
- session.endSession();
- rst.stopSet();
-
- // readConcern 'snapshot' is not allowed on a standalone.
- const conn = MongoRunner.runMongod();
- session = conn.startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
- assert.neq(null, conn, "mongod was unable to start up");
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+//
+// Configurations.
+//
+
+// Transactions should fail on storage engines that do not support them.
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+let session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+let sessionDb = session.getDatabase(dbName);
+if (!sessionDb.serverStatus().storageEngine.supportsSnapshotReadConcern) {
+ // Transactions with readConcern snapshot fail.
session.startTransaction({readConcern: {level: "snapshot"}});
assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
ErrorCodes.IllegalOperation);
assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.IllegalOperation);
- session.endSession();
- MongoRunner.stopMongod(conn);
-
- // readConcern 'snapshot' is allowed on a replica set primary.
- rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- assert.commandWorked(rst.getPrimary().getDB(dbName).runCommand(
- {create: collName, writeConcern: {w: "majority"}}));
- session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
- session.startTransaction({writeConcern: {w: "majority"}, readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDb.coll.insert({}));
- assert.commandWorked(sessionDb.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // readConcern 'snapshot' is allowed with 'afterClusterTime'.
+ [ErrorCodes.NoSuchTransaction, ErrorCodes.IllegalOperation]);
+
+ // Transactions without readConcern snapshot fail.
session.startTransaction();
- let pingRes = assert.commandWorked(rst.getPrimary().adminCommand({ping: 1}));
- assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
- assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
- assert.commandWorked(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot", afterClusterTime: pingRes.$clusterTime.clusterTime}
- }));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // readConcern 'snapshot' is not allowed with 'afterOpTime'.
- session.startTransaction(
- {readConcern: {level: "snapshot", afterOpTime: {ts: Timestamp(1, 2), t: 1}}});
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(sessionDb.runCommand({find: collName}),
+ ErrorCodes.IllegalOperation);
assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- session.endSession();
-
- pingRes = assert.commandWorked(rst.getSecondary().adminCommand({ping: 1}));
- assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
- assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
-
- session.startTransaction(
- {readConcern: {level: "snapshot", afterClusterTime: pingRes.$clusterTime.clusterTime}});
- assert.commandWorked(sessionDb.runCommand({find: collName}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- session.endSession();
- rst.stopSet();
+ [ErrorCodes.NoSuchTransaction, ErrorCodes.IllegalOperation]);
- //
- // Commands.
- //
-
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- let testDB = rst.getPrimary().getDB(dbName);
- let coll = testDB.coll;
- assert.commandWorked(coll.createIndex({geo: "2d"}));
- assert.commandWorked(testDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}],
- writeConcern: {w: "majority"}
- }));
-
- session = testDB.getMongo().startSession({causalConsistency: false});
- sessionDb = session.getDatabase(dbName);
-
- // readConcern 'snapshot' is supported by find.
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionDb.runCommand({find: collName}));
-
- // readConcern 'snapshot' is supported by aggregate.
- assert.commandWorked(sessionDb.runCommand({aggregate: collName, pipeline: [], cursor: {}}));
-
- // readConcern 'snapshot' is supported by distinct.
- assert.commandWorked(sessionDb.runCommand({distinct: collName, key: "x"}));
-
- // readConcern 'snapshot' is supported by geoSearch.
- assert.commandWorked(
- sessionDb.runCommand({geoSearch: collName, near: [0, 0], maxDistance: 1, search: {a: 1}}));
-
- // readConcern 'snapshot' is not supported by non-CRUD commands.
- assert.commandFailedWithCode(
- sessionDb.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandWorked(session.abortTransaction_forTesting());
- session.endSession();
rst.stopSet();
+ return;
+}
+session.endSession();
+rst.stopSet();
+
+// readConcern 'snapshot' is not allowed on a standalone.
+const conn = MongoRunner.runMongod();
+session = conn.startSession({causalConsistency: false});
+sessionDb = session.getDatabase(dbName);
+assert.neq(null, conn, "mongod was unable to start up");
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.IllegalOperation);
+session.endSession();
+MongoRunner.stopMongod(conn);
+
+// readConcern 'snapshot' is allowed on a replica set primary.
+rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+assert.commandWorked(
+ rst.getPrimary().getDB(dbName).runCommand({create: collName, writeConcern: {w: "majority"}}));
+session = rst.getPrimary().getDB(dbName).getMongo().startSession({causalConsistency: false});
+sessionDb = session.getDatabase(dbName);
+session.startTransaction({writeConcern: {w: "majority"}, readConcern: {level: "snapshot"}});
+assert.commandWorked(sessionDb.coll.insert({}));
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// readConcern 'snapshot' is allowed with 'afterClusterTime'.
+session.startTransaction();
+let pingRes = assert.commandWorked(rst.getPrimary().adminCommand({ping: 1}));
+assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
+assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
+assert.commandWorked(sessionDb.runCommand({
+ find: collName,
+ readConcern: {level: "snapshot", afterClusterTime: pingRes.$clusterTime.clusterTime}
+}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// readConcern 'snapshot' is not allowed with 'afterOpTime'.
+session.startTransaction(
+ {readConcern: {level: "snapshot", afterOpTime: {ts: Timestamp(1, 2), t: 1}}});
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.InvalidOptions);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+session.endSession();
+
+pingRes = assert.commandWorked(rst.getSecondary().adminCommand({ping: 1}));
+assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
+assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
+
+session.startTransaction(
+ {readConcern: {level: "snapshot", afterClusterTime: pingRes.$clusterTime.clusterTime}});
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+session.endSession();
+rst.stopSet();
+
+//
+// Commands.
+//
+
+rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+let testDB = rst.getPrimary().getDB(dbName);
+let coll = testDB.coll;
+assert.commandWorked(coll.createIndex({geo: "2d"}));
+assert.commandWorked(testDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}],
+ writeConcern: {w: "majority"}
+}));
+
+session = testDB.getMongo().startSession({causalConsistency: false});
+sessionDb = session.getDatabase(dbName);
+
+// readConcern 'snapshot' is supported by find.
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+
+// readConcern 'snapshot' is supported by aggregate.
+assert.commandWorked(sessionDb.runCommand({aggregate: collName, pipeline: [], cursor: {}}));
+
+// readConcern 'snapshot' is supported by distinct.
+assert.commandWorked(sessionDb.runCommand({distinct: collName, key: "x"}));
+
+// readConcern 'snapshot' is supported by geoSearch.
+assert.commandWorked(
+ sessionDb.runCommand({geoSearch: collName, near: [0, 0], maxDistance: 1, search: {a: 1}}));
+
+// readConcern 'snapshot' is not supported by non-CRUD commands.
+assert.commandFailedWithCode(
+ sessionDb.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandWorked(session.abortTransaction_forTesting());
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/readConcern_snapshot_mongos.js b/jstests/noPassthrough/readConcern_snapshot_mongos.js
index 74a2e6e0ffe..ab346a12937 100644
--- a/jstests/noPassthrough/readConcern_snapshot_mongos.js
+++ b/jstests/noPassthrough/readConcern_snapshot_mongos.js
@@ -1,131 +1,130 @@
// Test parsing of readConcern level 'snapshot' on mongos.
// @tags: [requires_replication,requires_sharding, uses_transactions, uses_atclustertime]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- // Runs the command as the first in a multi statement txn that is aborted right after, expecting
- // success.
- function expectSuccessInTxnThenAbort(session, sessionConn, cmdObj) {
- session.startTransaction();
- assert.commandWorked(sessionConn.runCommand(cmdObj));
- assert.commandWorked(session.abortTransaction_forTesting());
- }
-
- // Runs the command as the first in a multi statement txn that is aborted right after, expecting
- // failure with the given error code.
- function expectFailInTxnThenAbort(session, sessionConn, expectedErrorCode, cmdObj) {
- session.startTransaction();
- assert.commandFailedWithCode(sessionConn.runCommand(cmdObj), expectedErrorCode);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- }
-
- const dbName = "test";
- const collName = "coll";
-
- let st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2, mongos: 1});
- let testDB = st.getDB(dbName);
- let coll = testDB.coll;
-
- // Insert data to create the collection.
- assert.writeOK(testDB[collName].insert({x: 1}));
-
- flushRoutersAndRefreshShardMetadata(st, {ns: dbName + "." + collName, dbNames: [dbName]});
-
- // noPassthrough tests
-
- // readConcern 'snapshot' is not allowed outside session context.
- assert.commandFailedWithCode(
- testDB.runCommand({find: collName, readConcern: {level: "snapshot"}}),
- ErrorCodes.InvalidOptions);
-
- let session = testDB.getMongo().startSession({causalConsistency: false});
- let sessionDb = session.getDatabase(dbName);
-
- // readConcern 'snapshot' is not allowed outside transaction context.
- assert.commandFailedWithCode(sessionDb.runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- }),
- ErrorCodes.InvalidOptions);
-
- // readConcern 'snapshot' is not allowed with 'atClusterTime'.
- let pingRes = assert.commandWorked(st.s0.adminCommand({ping: 1}));
- assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
- assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
- const clusterTime = pingRes.$clusterTime.clusterTime;
-
- expectFailInTxnThenAbort(session, sessionDb, ErrorCodes.InvalidOptions, {
- find: collName,
- readConcern: {level: "snapshot", atClusterTime: clusterTime},
- });
-
- // Passthrough tests. There are parts not implemented on mongod and mongos, they are tracked by
- // separate jiras
-
- // readConcern 'snapshot' is supported by insert on mongos in a transaction.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- insert: collName,
- documents: [{_id: "single-insert"}],
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by update on mongos in a transaction.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- update: collName,
- updates: [{q: {_id: 0}, u: {$inc: {a: 1}}}],
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by delete on mongos in a transaction.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- delete: collName,
- deletes: [{q: {}, limit: 1}],
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by findAndModify on mongos in a transaction.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- findAndModify: collName,
- query: {},
- update: {$set: {a: 1}},
- readConcern: {level: "snapshot"},
- });
-
- expectSuccessInTxnThenAbort(session, sessionDb, {
- aggregate: collName,
- pipeline: [],
- cursor: {},
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by find on mongos.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- find: collName,
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is supported by distinct on mongos.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- distinct: collName,
- key: "x",
- readConcern: {level: "snapshot"},
- });
-
- // readConcern 'snapshot' is allowed with 'afterClusterTime'.
- expectSuccessInTxnThenAbort(session, sessionDb, {
- find: collName,
- readConcern: {level: "snapshot", afterClusterTime: clusterTime},
- });
-
- expectSuccessInTxnThenAbort(session, sessionDb, {
- aggregate: collName,
- pipeline: [],
- cursor: {},
- readConcern: {level: "snapshot", afterClusterTime: clusterTime},
- });
-
- st.stop();
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+// Runs the command as the first in a multi statement txn that is aborted right after, expecting
+// success.
+function expectSuccessInTxnThenAbort(session, sessionConn, cmdObj) {
+ session.startTransaction();
+ assert.commandWorked(sessionConn.runCommand(cmdObj));
+ assert.commandWorked(session.abortTransaction_forTesting());
+}
+
+// Runs the command as the first in a multi statement txn that is aborted right after, expecting
+// failure with the given error code.
+function expectFailInTxnThenAbort(session, sessionConn, expectedErrorCode, cmdObj) {
+ session.startTransaction();
+ assert.commandFailedWithCode(sessionConn.runCommand(cmdObj), expectedErrorCode);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+}
+
+const dbName = "test";
+const collName = "coll";
+
+let st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2, mongos: 1});
+let testDB = st.getDB(dbName);
+let coll = testDB.coll;
+
+// Insert data to create the collection.
+assert.writeOK(testDB[collName].insert({x: 1}));
+
+flushRoutersAndRefreshShardMetadata(st, {ns: dbName + "." + collName, dbNames: [dbName]});
+
+// noPassthrough tests
+
+// readConcern 'snapshot' is not allowed outside session context.
+assert.commandFailedWithCode(testDB.runCommand({find: collName, readConcern: {level: "snapshot"}}),
+ ErrorCodes.InvalidOptions);
+
+let session = testDB.getMongo().startSession({causalConsistency: false});
+let sessionDb = session.getDatabase(dbName);
+
+// readConcern 'snapshot' is not allowed outside transaction context.
+assert.commandFailedWithCode(sessionDb.runCommand({
+ find: collName,
+ readConcern: {level: "snapshot"},
+}),
+ ErrorCodes.InvalidOptions);
+
+// readConcern 'snapshot' is not allowed with 'atClusterTime'.
+let pingRes = assert.commandWorked(st.s0.adminCommand({ping: 1}));
+assert(pingRes.hasOwnProperty("$clusterTime"), tojson(pingRes));
+assert(pingRes.$clusterTime.hasOwnProperty("clusterTime"), tojson(pingRes));
+const clusterTime = pingRes.$clusterTime.clusterTime;
+
+expectFailInTxnThenAbort(session, sessionDb, ErrorCodes.InvalidOptions, {
+ find: collName,
+ readConcern: {level: "snapshot", atClusterTime: clusterTime},
+});
+
+// Passthrough tests. There are parts not implemented on mongod and mongos, they are tracked by
+// separate jiras
+
+// readConcern 'snapshot' is supported by insert on mongos in a transaction.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ insert: collName,
+ documents: [{_id: "single-insert"}],
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by update on mongos in a transaction.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ update: collName,
+ updates: [{q: {_id: 0}, u: {$inc: {a: 1}}}],
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by delete on mongos in a transaction.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ delete: collName,
+ deletes: [{q: {}, limit: 1}],
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by findAndModify on mongos in a transaction.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ findAndModify: collName,
+ query: {},
+ update: {$set: {a: 1}},
+ readConcern: {level: "snapshot"},
+});
+
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ aggregate: collName,
+ pipeline: [],
+ cursor: {},
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by find on mongos.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ find: collName,
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is supported by distinct on mongos.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ distinct: collName,
+ key: "x",
+ readConcern: {level: "snapshot"},
+});
+
+// readConcern 'snapshot' is allowed with 'afterClusterTime'.
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ find: collName,
+ readConcern: {level: "snapshot", afterClusterTime: clusterTime},
+});
+
+expectSuccessInTxnThenAbort(session, sessionDb, {
+ aggregate: collName,
+ pipeline: [],
+ cursor: {},
+ readConcern: {level: "snapshot", afterClusterTime: clusterTime},
+});
+
+st.stop();
}());
diff --git a/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js b/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js
index e92126186fb..22eaa2fbf89 100644
--- a/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js
+++ b/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js
@@ -2,46 +2,45 @@
//
// @tags: [requires_sharding]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- // Runs multiple commands with read concern level "snapshot" in a session,
- // expecting success.
- function expectSnapshotReadConcernIsSupported() {
- const st = new ShardingTest({shards: 1, config: 1});
- const session = st.s.startSession({causalConsistency: false});
- let txnNumber = 0;
-
- assert.commandWorked(session.getDatabase(dbName).runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- startTransaction: true,
- autocommit: false
- }));
-
- assert.commandWorked(session.getDatabase(dbName).runCommand({
- aggregate: collName,
- pipeline: [],
- cursor: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber++),
- startTransaction: true,
- autocommit: false
- }));
-
- session.endSession();
- st.stop();
- }
-
- // Snapshot readConcern should succeed when 'enableTestCommands' is set to false.
- jsTest.setOption("enableTestCommands", false);
- expectSnapshotReadConcernIsSupported();
-
- // Snapshot readConcern should succeed when 'enableTestCommands' is set to true.
- jsTest.setOption("enableTestCommands", true);
- expectSnapshotReadConcernIsSupported();
-
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+// Runs multiple commands with read concern level "snapshot" in a session,
+// expecting success.
+function expectSnapshotReadConcernIsSupported() {
+ const st = new ShardingTest({shards: 1, config: 1});
+ const session = st.s.startSession({causalConsistency: false});
+ let txnNumber = 0;
+
+ assert.commandWorked(session.getDatabase(dbName).runCommand({
+ find: collName,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber++),
+ startTransaction: true,
+ autocommit: false
+ }));
+
+ assert.commandWorked(session.getDatabase(dbName).runCommand({
+ aggregate: collName,
+ pipeline: [],
+ cursor: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber++),
+ startTransaction: true,
+ autocommit: false
+ }));
+
+ session.endSession();
+ st.stop();
+}
+
+// Snapshot readConcern should succeed when 'enableTestCommands' is set to false.
+jsTest.setOption("enableTestCommands", false);
+expectSnapshotReadConcernIsSupported();
+
+// Snapshot readConcern should succeed when 'enableTestCommands' is set to true.
+jsTest.setOption("enableTestCommands", true);
+expectSnapshotReadConcernIsSupported();
}());
diff --git a/jstests/noPassthrough/read_concern_helper.js b/jstests/noPassthrough/read_concern_helper.js
index b83b48bdf34..d8cb159b0c7 100644
--- a/jstests/noPassthrough/read_concern_helper.js
+++ b/jstests/noPassthrough/read_concern_helper.js
@@ -1,27 +1,27 @@
// This tests readConcern handling for the find/findOne shell helpers.
// @tags: [requires_majority_read_concern]
(function() {
- "use strict";
- var testServer = MongoRunner.runMongod();
- if (!testServer.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- MongoRunner.stopMongod(testServer);
- return;
- }
- var coll = testServer.getDB("test").readMajority;
+"use strict";
+var testServer = MongoRunner.runMongod();
+if (!testServer.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ MongoRunner.stopMongod(testServer);
+ return;
+}
+var coll = testServer.getDB("test").readMajority;
- assert.doesNotThrow(function() {
- coll.find({_id: "foo"}).readConcern("majority").itcount();
- });
- assert.doesNotThrow(function() {
- coll.findOne({_id: "foo"}, {}, {}, "majority");
- });
- assert.doesNotThrow(function() {
- coll.count({_id: "foo"}, {readConcern: "majority"});
- });
- assert.doesNotThrow(function() {
- coll.find({_id: "foo"}).readConcern("majority").count();
- });
+assert.doesNotThrow(function() {
+ coll.find({_id: "foo"}).readConcern("majority").itcount();
+});
+assert.doesNotThrow(function() {
+ coll.findOne({_id: "foo"}, {}, {}, "majority");
+});
+assert.doesNotThrow(function() {
+ coll.count({_id: "foo"}, {readConcern: "majority"});
+});
+assert.doesNotThrow(function() {
+ coll.find({_id: "foo"}).readConcern("majority").count();
+});
- MongoRunner.stopMongod(testServer);
+MongoRunner.stopMongod(testServer);
}());
diff --git a/jstests/noPassthrough/read_concern_snapshot_aggregation.js b/jstests/noPassthrough/read_concern_snapshot_aggregation.js
index 9c36b6ebf2e..2cb5fe26fd6 100644
--- a/jstests/noPassthrough/read_concern_snapshot_aggregation.js
+++ b/jstests/noPassthrough/read_concern_snapshot_aggregation.js
@@ -4,129 +4,129 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- const kAdminDB = "admin";
- const kCollName = "coll";
- const kConfigDB = "config";
- const kDBName = "test";
- const kWCMajority = {writeConcern: {w: "majority"}};
+const kAdminDB = "admin";
+const kCollName = "coll";
+const kConfigDB = "config";
+const kDBName = "test";
+const kWCMajority = {
+ writeConcern: {w: "majority"}
+};
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- let session =
- rst.getPrimary().getDB(kDBName).getMongo().startSession({causalConsistency: false});
- let sessionDB = session.getDatabase(kDBName);
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+let session = rst.getPrimary().getDB(kDBName).getMongo().startSession({causalConsistency: false});
+let sessionDB = session.getDatabase(kDBName);
- let txnNumber = NumberLong(0);
- assert.commandWorked(sessionDB.runCommand({create: kCollName, writeConcern: {w: "majority"}}));
+let txnNumber = NumberLong(0);
+assert.commandWorked(sessionDB.runCommand({create: kCollName, writeConcern: {w: "majority"}}));
- function testSnapshotAggFailsWithCode(coll, pipeline, code) {
- let cmd = {aggregate: coll, pipeline: pipeline, cursor: {}};
+function testSnapshotAggFailsWithCode(coll, pipeline, code) {
+ let cmd = {aggregate: coll, pipeline: pipeline, cursor: {}};
- let cmdAsSnapshotRead = Object.extend({}, cmd);
- cmdAsSnapshotRead.txnNumber = NumberLong(++txnNumber);
- cmdAsSnapshotRead.readConcern = {level: "snapshot"};
- cmdAsSnapshotRead.autocommit = false;
- cmdAsSnapshotRead.startTransaction = true;
- assert.commandFailedWithCode(sessionDB.runCommand(cmdAsSnapshotRead), code);
+ let cmdAsSnapshotRead = Object.extend({}, cmd);
+ cmdAsSnapshotRead.txnNumber = NumberLong(++txnNumber);
+ cmdAsSnapshotRead.readConcern = {level: "snapshot"};
+ cmdAsSnapshotRead.autocommit = false;
+ cmdAsSnapshotRead.startTransaction = true;
+ assert.commandFailedWithCode(sessionDB.runCommand(cmdAsSnapshotRead), code);
- // As a sanity check, also make sure that the command succeeds when run without a txn number
- // and without a readConcern.
- assert.commandWorked(sessionDB.runCommand(cmd));
- }
+ // As a sanity check, also make sure that the command succeeds when run without a txn number
+ // and without a readConcern.
+ assert.commandWorked(sessionDB.runCommand(cmd));
+}
- // Test that $changeStream is disallowed with transactions.
- // TODO SERVER-37221: Remove the check for 'supportsCommittedReads'.
- if (sessionDB.serverStatus().storageEngine.supportsCommittedReads) {
- testSnapshotAggFailsWithCode(
- kCollName, [{$changeStream: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- }
-
- // Test that $collStats is disallowed with transactions.
- testSnapshotAggFailsWithCode(
- kCollName, [{$collStats: {}}], ErrorCodes.OperationNotSupportedInTransaction);
-
- // Test that $indexStats is disallowed with transactions.
+// Test that $changeStream is disallowed with transactions.
+// TODO SERVER-37221: Remove the check for 'supportsCommittedReads'.
+if (sessionDB.serverStatus().storageEngine.supportsCommittedReads) {
testSnapshotAggFailsWithCode(
- kCollName, [{$indexStats: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+ kCollName, [{$changeStream: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+}
- // Test that $listLocalSessions is disallowed with transactions.
- testSnapshotAggFailsWithCode(
- 1, [{$listLocalSessions: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+// Test that $collStats is disallowed with transactions.
+testSnapshotAggFailsWithCode(
+ kCollName, [{$collStats: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- // Test that $out is disallowed with transactions.
- testSnapshotAggFailsWithCode(
- kCollName, [{$out: "out"}], ErrorCodes.OperationNotSupportedInTransaction);
+// Test that $indexStats is disallowed with transactions.
+testSnapshotAggFailsWithCode(
+ kCollName, [{$indexStats: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- // Test that $listSessions is disallowed with transactions. This stage must be run against
- // 'system.sessions' in the config database, which cannot be queried in a transaction.
- sessionDB = session.getDatabase(kConfigDB);
- testSnapshotAggFailsWithCode(
- "system.sessions", [{$listSessions: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+// Test that $listLocalSessions is disallowed with transactions.
+testSnapshotAggFailsWithCode(
+ 1, [{$listLocalSessions: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- // Test that $currentOp is disallowed with transactions. We have to reassign 'sessionDB' to
- // refer to the admin database, because $currentOp pipelines are required to run against
- // 'admin'. Queries against 'admin' are not permitted in a transaction.
- sessionDB = session.getDatabase(kAdminDB);
- testSnapshotAggFailsWithCode(
- 1, [{$currentOp: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- sessionDB = session.getDatabase(kDBName);
+// Test that $out is disallowed with transactions.
+testSnapshotAggFailsWithCode(
+ kCollName, [{$out: "out"}], ErrorCodes.OperationNotSupportedInTransaction);
- // Helper for testing that aggregation stages which involve a local and foreign collection
- // ($lookup and $graphLookup) obey the expected readConcern "snapshot" isolation semantics.
- //
- // Inserts 'localDocsPre' into the 'local' collection and 'foreignDocsPre' into the 'foreign'
- // collection. Then runs the first batch of 'pipeline', before inserting 'localDocsPost' into
- // 'local' and 'foreignDocsPost' into 'foreign'. Iterates the remainder of the aggregation
- // cursor and verifies that the result set matches 'expectedResults'.
- function testLookupReadConcernSnapshotIsolation(
- {localDocsPre, foreignDocsPre, localDocsPost, foreignDocsPost, pipeline, expectedResults}) {
- sessionDB.runCommand({drop: "local", writeConcern: {w: "majority"}});
- sessionDB.runCommand({drop: "foreign", writeConcern: {w: "majority"}});
- let localColl = sessionDB.local;
- let foreignColl = sessionDB.foreign;
- assert.commandWorked(localColl.insert(localDocsPre, kWCMajority));
- assert.commandWorked(foreignColl.insert(foreignDocsPre, kWCMajority));
- let cmdRes = sessionDB.runCommand({
- aggregate: localColl.getName(),
- pipeline: pipeline,
- cursor: {batchSize: 0},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(++txnNumber),
- startTransaction: true,
- autocommit: false
- });
- assert.commandWorked(cmdRes);
- assert.neq(0, cmdRes.cursor.id);
- assert.eq(0, cmdRes.cursor.firstBatch.length);
+// Test that $listSessions is disallowed with transactions. This stage must be run against
+// 'system.sessions' in the config database, which cannot be queried in a transaction.
+sessionDB = session.getDatabase(kConfigDB);
+testSnapshotAggFailsWithCode(
+ "system.sessions", [{$listSessions: {}}], ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandWorked(localColl.insert(localDocsPost, kWCMajority));
- assert.commandWorked(foreignColl.insert(foreignDocsPost, kWCMajority));
- let results =
- new DBCommandCursor(sessionDB, cmdRes, undefined, undefined, NumberLong(txnNumber))
- .toArray();
- assert.eq(results, expectedResults);
- assert.commandWorked(sessionDB.adminCommand(
- {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
- }
+// Test that $currentOp is disallowed with transactions. We have to reassign 'sessionDB' to
+// refer to the admin database, because $currentOp pipelines are required to run against
+// 'admin'. Queries against 'admin' are not permitted in a transaction.
+sessionDB = session.getDatabase(kAdminDB);
+testSnapshotAggFailsWithCode(1, [{$currentOp: {}}], ErrorCodes.OperationNotSupportedInTransaction);
+sessionDB = session.getDatabase(kDBName);
- // Test that snapshot isolation works with $lookup using localField/foreignField syntax.
- testLookupReadConcernSnapshotIsolation({
- localDocsPre: [{_id: 0}, {_id: 1}, {_id: 2}],
- foreignDocsPre: [{_id: 1}],
- localDocsPost: [{_id: 3}],
- foreignDocsPost: [{_id: 2}, {_id: 3}],
- pipeline: [
- {$lookup: {from: "foreign", localField: "_id", foreignField: "_id", as: "as"}},
- {$sort: {_id: 1}}
- ],
- expectedResults: [{_id: 0, as: []}, {_id: 1, as: [{_id: 1}]}, {_id: 2, as: []}]
+// Helper for testing that aggregation stages which involve a local and foreign collection
+// ($lookup and $graphLookup) obey the expected readConcern "snapshot" isolation semantics.
+//
+// Inserts 'localDocsPre' into the 'local' collection and 'foreignDocsPre' into the 'foreign'
+// collection. Then runs the first batch of 'pipeline', before inserting 'localDocsPost' into
+// 'local' and 'foreignDocsPost' into 'foreign'. Iterates the remainder of the aggregation
+// cursor and verifies that the result set matches 'expectedResults'.
+function testLookupReadConcernSnapshotIsolation(
+ {localDocsPre, foreignDocsPre, localDocsPost, foreignDocsPost, pipeline, expectedResults}) {
+ sessionDB.runCommand({drop: "local", writeConcern: {w: "majority"}});
+ sessionDB.runCommand({drop: "foreign", writeConcern: {w: "majority"}});
+ let localColl = sessionDB.local;
+ let foreignColl = sessionDB.foreign;
+ assert.commandWorked(localColl.insert(localDocsPre, kWCMajority));
+ assert.commandWorked(foreignColl.insert(foreignDocsPre, kWCMajority));
+ let cmdRes = sessionDB.runCommand({
+ aggregate: localColl.getName(),
+ pipeline: pipeline,
+ cursor: {batchSize: 0},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(++txnNumber),
+ startTransaction: true,
+ autocommit: false
});
+ assert.commandWorked(cmdRes);
+ assert.neq(0, cmdRes.cursor.id);
+ assert.eq(0, cmdRes.cursor.firstBatch.length);
+
+ assert.commandWorked(localColl.insert(localDocsPost, kWCMajority));
+ assert.commandWorked(foreignColl.insert(foreignDocsPost, kWCMajority));
+ let results =
+ new DBCommandCursor(sessionDB, cmdRes, undefined, undefined, NumberLong(txnNumber))
+ .toArray();
+ assert.eq(results, expectedResults);
+ assert.commandWorked(sessionDB.adminCommand(
+ {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+}
- // Test that snapshot isolation works with $lookup into a nested pipeline.
- testLookupReadConcernSnapshotIsolation({
+// Test that snapshot isolation works with $lookup using localField/foreignField syntax.
+testLookupReadConcernSnapshotIsolation({
+ localDocsPre: [{_id: 0}, {_id: 1}, {_id: 2}],
+ foreignDocsPre: [{_id: 1}],
+ localDocsPost: [{_id: 3}],
+ foreignDocsPost: [{_id: 2}, {_id: 3}],
+ pipeline: [
+ {$lookup: {from: "foreign", localField: "_id", foreignField: "_id", as: "as"}},
+ {$sort: {_id: 1}}
+ ],
+ expectedResults: [{_id: 0, as: []}, {_id: 1, as: [{_id: 1}]}, {_id: 2, as: []}]
+});
+
+// Test that snapshot isolation works with $lookup into a nested pipeline.
+testLookupReadConcernSnapshotIsolation({
localDocsPre: [{_id: 0}, {_id: 1}, {_id: 2}],
foreignDocsPre: [{_id: 1}],
localDocsPost: [{_id: 3}],
@@ -145,8 +145,8 @@
expectedResults: [{_id: 0, as: []}, {_id: 1, as: [{_id: 1}]}, {_id: 2, as: []}]
});
- // Test that snapshot isolation works with $graphLookup.
- testLookupReadConcernSnapshotIsolation({
+// Test that snapshot isolation works with $graphLookup.
+testLookupReadConcernSnapshotIsolation({
localDocsPre: [{_id: 0}, {_id: 1}, {_id: 2}],
foreignDocsPre: [{_id: 1, linkTo: 2}],
localDocsPost: [{_id: 3}],
@@ -167,97 +167,94 @@
[{_id: 0, as: []}, {_id: 1, as: [{_id: 1, linkTo: 2}]}, {_id: 2, as: []}]
});
- // Test that snapshot isolation works for $geoNear. Special care is taken to test snapshot
- // isolation across getMore for $geoNear as it is an initial document source.
- assert.commandWorked(sessionDB.runCommand({drop: kCollName, writeConcern: {w: "majority"}}));
- assert.commandWorked(sessionDB.runCommand({
- createIndexes: kCollName,
- indexes: [{key: {geo: "2dsphere"}, name: "geo_2dsphere"}],
- writeConcern: {w: "majority"}
- }));
+// Test that snapshot isolation works for $geoNear. Special care is taken to test snapshot
+// isolation across getMore for $geoNear as it is an initial document source.
+assert.commandWorked(sessionDB.runCommand({drop: kCollName, writeConcern: {w: "majority"}}));
+assert.commandWorked(sessionDB.runCommand({
+ createIndexes: kCollName,
+ indexes: [{key: {geo: "2dsphere"}, name: "geo_2dsphere"}],
+ writeConcern: {w: "majority"}
+}));
- const coll = sessionDB.getCollection(kCollName);
- let bulk = coll.initializeUnorderedBulkOp();
- const numInitialGeoInsert = 4;
- for (let i = 0; i < numInitialGeoInsert; ++i) {
- bulk.insert({_id: i, geo: {type: "Point", coordinates: [0, 0]}});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
+const coll = sessionDB.getCollection(kCollName);
+let bulk = coll.initializeUnorderedBulkOp();
+const numInitialGeoInsert = 4;
+for (let i = 0; i < numInitialGeoInsert; ++i) {
+ bulk.insert({_id: i, geo: {type: "Point", coordinates: [0, 0]}});
+}
+assert.commandWorked(bulk.execute({w: "majority"}));
- let cmdRes = assert.commandWorked(sessionDB.runCommand({
- aggregate: kCollName,
- pipeline: [{
- $geoNear: {
- spherical: true,
- near: {type: "Point", coordinates: [0, 0]},
- distanceField: "distance"
- }
- }],
- txnNumber: NumberLong(++txnNumber),
- readConcern: {level: "snapshot"},
- autocommit: false,
- startTransaction: true,
- cursor: {batchSize: 0}
- }));
- assert(cmdRes.hasOwnProperty("cursor"));
- const cursorId = cmdRes.cursor.id;
- assert.neq(cursorId, 0);
+let cmdRes = assert.commandWorked(sessionDB.runCommand({
+ aggregate: kCollName,
+ pipeline: [{
+ $geoNear:
+ {spherical: true, near: {type: "Point", coordinates: [0, 0]}, distanceField: "distance"}
+ }],
+ txnNumber: NumberLong(++txnNumber),
+ readConcern: {level: "snapshot"},
+ autocommit: false,
+ startTransaction: true,
+ cursor: {batchSize: 0}
+}));
+assert(cmdRes.hasOwnProperty("cursor"));
+const cursorId = cmdRes.cursor.id;
+assert.neq(cursorId, 0);
- assert.commandWorked(
- coll.insert({_id: numInitialGeoInsert, geo: {type: "Point", coordinates: [0, 0]}},
- {writeConcern: {w: "majority"}}));
+assert.commandWorked(
+ coll.insert({_id: numInitialGeoInsert, geo: {type: "Point", coordinates: [0, 0]}},
+ {writeConcern: {w: "majority"}}));
- cmdRes = assert.commandWorked(sessionDB.runCommand({
- getMore: NumberLong(cursorId),
- collection: kCollName,
- autocommit: false,
- txnNumber: NumberLong(txnNumber)
- }));
- assert.commandWorked(sessionDB.adminCommand(
- {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
- assert(cmdRes.hasOwnProperty("cursor"));
- assert(cmdRes.cursor.hasOwnProperty("nextBatch"));
- assert.eq(cmdRes.cursor.nextBatch.length, numInitialGeoInsert);
+cmdRes = assert.commandWorked(sessionDB.runCommand({
+ getMore: NumberLong(cursorId),
+ collection: kCollName,
+ autocommit: false,
+ txnNumber: NumberLong(txnNumber)
+}));
+assert.commandWorked(sessionDB.adminCommand(
+ {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+assert(cmdRes.hasOwnProperty("cursor"));
+assert(cmdRes.cursor.hasOwnProperty("nextBatch"));
+assert.eq(cmdRes.cursor.nextBatch.length, numInitialGeoInsert);
- // Test that snapshot reads are legal for $facet.
- assert.commandWorked(sessionDB.runCommand({drop: kCollName, writeConcern: {w: "majority"}}));
- assert.commandWorked(coll.insert(
- [
- {group1: 1, group2: 1, val: 1},
- {group1: 1, group2: 2, val: 2},
- {group1: 2, group2: 2, val: 8}
- ],
- kWCMajority));
+// Test that snapshot reads are legal for $facet.
+assert.commandWorked(sessionDB.runCommand({drop: kCollName, writeConcern: {w: "majority"}}));
+assert.commandWorked(coll.insert(
+ [
+ {group1: 1, group2: 1, val: 1},
+ {group1: 1, group2: 2, val: 2},
+ {group1: 2, group2: 2, val: 8}
+ ],
+ kWCMajority));
- cmdRes = sessionDB.runCommand({
- aggregate: kCollName,
- pipeline: [
- {
- $facet: {
- g1: [{$group: {_id: "$group1", sum: {$sum: "$val"}}}, {$sort: {_id: 1}}],
- g2: [{$group: {_id: "$group2", sum: {$sum: "$val"}}}, {$sort: {_id: 1}}]
- }
- },
- {$unwind: "$g1"},
- {$unwind: "$g2"},
- {$sort: {"g1._id": 1, "g2._id": 1}}
- ],
- cursor: {},
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(++txnNumber),
- startTransaction: true,
- autocommit: false
- });
- assert.commandWorked(sessionDB.adminCommand(
- {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
- assert.commandWorked(cmdRes);
- assert.eq(0, cmdRes.cursor.id);
- assert.eq(cmdRes.cursor.firstBatch, [
- {g1: {_id: 1, sum: 3}, g2: {_id: 1, sum: 1}},
- {g1: {_id: 1, sum: 3}, g2: {_id: 2, sum: 10}},
- {g1: {_id: 2, sum: 8}, g2: {_id: 1, sum: 1}},
- {g1: {_id: 2, sum: 8}, g2: {_id: 2, sum: 10}}
- ]);
+cmdRes = sessionDB.runCommand({
+ aggregate: kCollName,
+ pipeline: [
+ {
+ $facet: {
+ g1: [{$group: {_id: "$group1", sum: {$sum: "$val"}}}, {$sort: {_id: 1}}],
+ g2: [{$group: {_id: "$group2", sum: {$sum: "$val"}}}, {$sort: {_id: 1}}]
+ }
+ },
+ {$unwind: "$g1"},
+ {$unwind: "$g2"},
+ {$sort: {"g1._id": 1, "g2._id": 1}}
+ ],
+ cursor: {},
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(++txnNumber),
+ startTransaction: true,
+ autocommit: false
+});
+assert.commandWorked(sessionDB.adminCommand(
+ {commitTransaction: 1, txnNumber: NumberLong(txnNumber), autocommit: false}));
+assert.commandWorked(cmdRes);
+assert.eq(0, cmdRes.cursor.id);
+assert.eq(cmdRes.cursor.firstBatch, [
+ {g1: {_id: 1, sum: 3}, g2: {_id: 1, sum: 1}},
+ {g1: {_id: 1, sum: 3}, g2: {_id: 2, sum: 10}},
+ {g1: {_id: 2, sum: 8}, g2: {_id: 1, sum: 1}},
+ {g1: {_id: 2, sum: 8}, g2: {_id: 2, sum: 10}}
+]);
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js b/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js
index eefbe613f84..3a65ab021b5 100644
--- a/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js
+++ b/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js
@@ -2,108 +2,107 @@
// for the snapshot's point in time.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- const kDbName = "test";
- const kCollName = "coll";
+const kDbName = "test";
+const kCollName = "coll";
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const testDB = rst.getPrimary().getDB(kDbName);
- const adminDB = testDB.getSiblingDB("admin");
- const coll = testDB.getCollection(kCollName);
+const testDB = rst.getPrimary().getDB(kDbName);
+const adminDB = testDB.getSiblingDB("admin");
+const coll = testDB.getCollection(kCollName);
- // Waits for the operation to reach the "hangAfterPreallocateSnapshot" failpoint.
- function waitForOp(curOpFilter) {
- assert.soon(
- function() {
- const res =
- adminDB
- .aggregate([
- {$currentOp: {}},
- {$match: {$and: [curOpFilter, {msg: "hangAfterPreallocateSnapshot"}]}}
- ])
- .toArray();
- if (res.length === 1) {
- return true;
- }
- return false;
- },
- function() {
- return "Failed to find operation in $currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
- });
- }
+// Waits for the operation to reach the "hangAfterPreallocateSnapshot" failpoint.
+function waitForOp(curOpFilter) {
+ assert.soon(
+ function() {
+ const res =
+ adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {$and: [curOpFilter, {msg: "hangAfterPreallocateSnapshot"}]}}
+ ])
+ .toArray();
+ if (res.length === 1) {
+ return true;
+ }
+ return false;
+ },
+ function() {
+ return "Failed to find operation in $currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
+ });
+}
- function testCommand(cmd, curOpFilter) {
- coll.drop({writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({
- createIndexes: kCollName,
- indexes:
- [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}]
- }));
- assert.commandWorked(coll.insert({x: 1}, {writeConcern: {w: "majority"}}));
+function testCommand(cmd, curOpFilter) {
+ coll.drop({writeConcern: {w: "majority"}});
+ assert.commandWorked(testDB.runCommand({
+ createIndexes: kCollName,
+ indexes: [{key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}]
+ }));
+ assert.commandWorked(coll.insert({x: 1}, {writeConcern: {w: "majority"}}));
- // Start a command with readConcern "snapshot" that hangs after establishing a storage
- // engine transaction.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangAfterPreallocateSnapshot", mode: "alwaysOn"}));
+ // Start a command with readConcern "snapshot" that hangs after establishing a storage
+ // engine transaction.
+ assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: "hangAfterPreallocateSnapshot", mode: "alwaysOn"}));
- const awaitCommand = startParallelShell(
- "const session = db.getMongo().startSession();" +
- "const sessionDb = session.getDatabase('test');" +
- "session.startTransaction({readConcern: {level: 'snapshot'}});" +
- "const res = sessionDb.runCommand(" + tojson(cmd) + ");" +
- "assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);" +
- "assert.eq(res.errorLabels, ['TransientTransactionError']);" +
- "session.endSession();",
- rst.ports[0]);
+ const awaitCommand = startParallelShell(
+ "const session = db.getMongo().startSession();" +
+ "const sessionDb = session.getDatabase('test');" +
+ "session.startTransaction({readConcern: {level: 'snapshot'}});" +
+ "const res = sessionDb.runCommand(" + tojson(cmd) + ");" +
+ "assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);" +
+ "assert.eq(res.errorLabels, ['TransientTransactionError']);" +
+ "session.endSession();",
+ rst.ports[0]);
- waitForOp(curOpFilter);
+ waitForOp(curOpFilter);
- // Create an index on the collection the command was executed against. This will move the
- // collection's minimum visible timestamp to a point later than the point-in-time referenced
- // by the transaction snapshot.
- assert.commandWorked(testDB.runCommand({
- createIndexes: kCollName,
- indexes: [{key: {x: 1}, name: "x_1"}],
- writeConcern: {w: "majority"}
- }));
+ // Create an index on the collection the command was executed against. This will move the
+ // collection's minimum visible timestamp to a point later than the point-in-time referenced
+ // by the transaction snapshot.
+ assert.commandWorked(testDB.runCommand({
+ createIndexes: kCollName,
+ indexes: [{key: {x: 1}, name: "x_1"}],
+ writeConcern: {w: "majority"}
+ }));
- // Disable the hang and check for parallel shell success. Success indicates that the command
- // failed due to collection metadata invalidation.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "hangAfterPreallocateSnapshot", mode: "off"}));
+ // Disable the hang and check for parallel shell success. Success indicates that the command
+ // failed due to collection metadata invalidation.
+ assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangAfterPreallocateSnapshot", mode: "off"}));
- awaitCommand();
- }
+ awaitCommand();
+}
- testCommand({aggregate: kCollName, pipeline: [], cursor: {}},
- {"command.aggregate": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({delete: kCollName, deletes: [{q: {x: 1}, limit: 1}]},
- {"command.delete": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({distinct: kCollName, key: "x"},
- {"command.distinct": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({find: kCollName},
- {"command.find": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({findAndModify: kCollName, query: {x: 1}, remove: true}, {
- "command.findAndModify": kCollName,
- "command.remove": true,
- "command.readConcern.level": "snapshot"
- });
- testCommand({findAndModify: kCollName, query: {x: 1}, update: {$set: {x: 2}}}, {
- "command.findAndModify": kCollName,
- "command.update.$set": {x: 2},
- "command.readConcern.level": "snapshot"
- });
- testCommand({geoSearch: kCollName, near: [0, 0], maxDistance: 1, search: {a: 1}},
- {"command.geoSearch": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({insert: kCollName, documents: [{x: 1}]},
- {"command.insert": kCollName, "command.readConcern.level": "snapshot"});
- testCommand({update: kCollName, updates: [{q: {x: 1}, u: {$set: {x: 2}}}]},
- {"command.update": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({aggregate: kCollName, pipeline: [], cursor: {}},
+ {"command.aggregate": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({delete: kCollName, deletes: [{q: {x: 1}, limit: 1}]},
+ {"command.delete": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({distinct: kCollName, key: "x"},
+ {"command.distinct": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({find: kCollName},
+ {"command.find": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({findAndModify: kCollName, query: {x: 1}, remove: true}, {
+ "command.findAndModify": kCollName,
+ "command.remove": true,
+ "command.readConcern.level": "snapshot"
+});
+testCommand({findAndModify: kCollName, query: {x: 1}, update: {$set: {x: 2}}}, {
+ "command.findAndModify": kCollName,
+ "command.update.$set": {x: 2},
+ "command.readConcern.level": "snapshot"
+});
+testCommand({geoSearch: kCollName, near: [0, 0], maxDistance: 1, search: {a: 1}},
+ {"command.geoSearch": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({insert: kCollName, documents: [{x: 1}]},
+ {"command.insert": kCollName, "command.readConcern.level": "snapshot"});
+testCommand({update: kCollName, updates: [{q: {x: 1}, u: {$set: {x: 2}}}]},
+ {"command.update": kCollName, "command.readConcern.level": "snapshot"});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/read_concern_snapshot_yielding.js b/jstests/noPassthrough/read_concern_snapshot_yielding.js
index 063e175030a..a5a2605cbae 100644
--- a/jstests/noPassthrough/read_concern_snapshot_yielding.js
+++ b/jstests/noPassthrough/read_concern_snapshot_yielding.js
@@ -3,152 +3,180 @@
// storage engine resources.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
+"use strict";
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+const dbName = "test";
+const collName = "coll";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const db = rst.getPrimary().getDB(dbName);
+const adminDB = db.getSiblingDB("admin");
+const coll = db.coll;
+TestData.numDocs = 4;
+
+// Set 'internalQueryExecYieldIterations' to 2 to ensure that commands yield on the second try
+// (i.e. after they have established a snapshot but before they have returned any documents).
+assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 2}));
+
+function waitForOpId(curOpFilter) {
+ let opId;
+ // Wait until we know the failpoint 'setInterruptOnlyPlansCheckForInterruptHang' has been
+ // reached.
+ assert.soon(
+ function() {
+ const res = adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {
+ $match: {
+ $and: [
+ {ns: coll.getFullName()},
+ curOpFilter,
+ {"msg": "setInterruptOnlyPlansCheckForInterruptHang"}
+ ]
+ }
+ }
+ ])
+ .toArray();
+
+ if (res.length === 1) {
+ opId = res[0].opid;
+ return true;
+ }
+ return false;
+ },
+ function() {
+ return "Failed to find operation in $currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}])
+ .toArray());
+ });
+ return opId;
+}
+
+function assertKillPending(opId) {
+ const res =
+ adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName(), opid: opId}}])
+ .toArray();
+ assert.eq(
+ res.length,
+ 1,
+ tojson(
+ adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}]).toArray()));
+ assert(res[0].hasOwnProperty("killPending"), tojson(res));
+ assert.eq(true, res[0].killPending, tojson(res));
+}
+
+function populateCollection() {
+ db.coll.drop({writeConcern: {w: "majority"}});
+ for (let i = 0; i < TestData.numDocs; i++) {
+ assert.commandWorked(
+ db.coll.insert({_id: i, x: 1, location: [0, 0]}, {writeConcern: {w: "majority"}}));
}
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const db = rst.getPrimary().getDB(dbName);
- const adminDB = db.getSiblingDB("admin");
- const coll = db.coll;
- TestData.numDocs = 4;
-
- // Set 'internalQueryExecYieldIterations' to 2 to ensure that commands yield on the second try
- // (i.e. after they have established a snapshot but before they have returned any documents).
- assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 2}));
-
- function waitForOpId(curOpFilter) {
- let opId;
- // Wait until we know the failpoint 'setInterruptOnlyPlansCheckForInterruptHang' has been
- // reached.
- assert.soon(
- function() {
- const res =
- adminDB
- .aggregate([
- {$currentOp: {}},
- {
- $match: {
- $and: [
- {ns: coll.getFullName()},
- curOpFilter,
- {"msg": "setInterruptOnlyPlansCheckForInterruptHang"}
- ]
- }
- }
- ])
- .toArray();
-
- if (res.length === 1) {
- opId = res[0].opid;
- return true;
- }
- return false;
- },
- function() {
- return "Failed to find operation in $currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}])
- .toArray());
- });
- return opId;
- }
-
- function assertKillPending(opId) {
- const res =
- adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName(), opid: opId}}])
- .toArray();
- assert.eq(res.length,
- 1,
- tojson(adminDB.aggregate([{$currentOp: {}}, {$match: {ns: coll.getFullName()}}])
- .toArray()));
- assert(res[0].hasOwnProperty("killPending"), tojson(res));
- assert.eq(true, res[0].killPending, tojson(res));
- }
-
- function populateCollection() {
- db.coll.drop({writeConcern: {w: "majority"}});
- for (let i = 0; i < TestData.numDocs; i++) {
- assert.commandWorked(
- db.coll.insert({_id: i, x: 1, location: [0, 0]}, {writeConcern: {w: "majority"}}));
- }
-
- assert.commandWorked(db.runCommand({
- createIndexes: "coll",
- indexes: [{key: {location: "2d"}, name: "geo_2d"}],
- writeConcern: {w: "majority"}
- }));
- }
-
- function testCommand(awaitCommandFn, curOpFilter, testWriteConflict) {
- //
- // Test that the command can be killed.
- //
-
+ assert.commandWorked(db.runCommand({
+ createIndexes: "coll",
+ indexes: [{key: {location: "2d"}, name: "geo_2d"}],
+ writeConcern: {w: "majority"}
+ }));
+}
+
+function testCommand(awaitCommandFn, curOpFilter, testWriteConflict) {
+ //
+ // Test that the command can be killed.
+ //
+
+ TestData.txnNumber++;
+ populateCollection();
+
+ // Start a command that hangs before checking for interrupt.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ let awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
+
+ // Kill the command, and check that it is set to killPending.
+ let opId = waitForOpId(curOpFilter);
+ assert.commandWorked(db.killOp(opId));
+ assertKillPending(opId);
+
+ // Remove the hang, and check that the command is killed.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ let exitCode = awaitCommand({checkExitSuccess: false});
+ assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill");
+
+ //
+ // Test that the command does not yield locks.
+ //
+
+ TestData.txnNumber++;
+ populateCollection();
+
+ // Start a command that hangs before checking for interrupt.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
+ waitForOpId(curOpFilter);
+
+ // Start a drop. This should block behind the command, since the command does not yield
+ // locks.
+ let awaitDrop = startParallelShell(function() {
+ db.getSiblingDB("test").coll.drop({writeConcern: {w: "majority"}});
+ }, rst.ports[0]);
+
+ // Remove the hang. The command should complete successfully.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ awaitCommand();
+
+ // Now the drop can complete.
+ awaitDrop();
+
+ //
+ // Test that the command does not read data that is inserted during its execution.
+ // 'awaitCommandFn' should fail if it reads the following document:
+ // {_id: <numDocs>, x: 1, new: 1, location: [0, 0]}
+ //
+
+ TestData.txnNumber++;
+ populateCollection();
+
+ // Start a command that hangs before checking for interrupt.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
+ waitForOpId(curOpFilter);
+
+ // Insert data that should not be read by the command.
+ assert.commandWorked(db.coll.insert({_id: TestData.numDocs, x: 1, new: 1, location: [0, 0]},
+ {writeConcern: {w: "majority"}}));
+
+ // Remove the hang. The command should complete successfully.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ awaitCommand();
+
+ //
+ // Test that the command fails if a write conflict occurs. 'awaitCommandFn' should write to
+ // the following document: {_id: <numDocs>, x: 1, new: 1, location: [0, 0]}
+ //
+
+ if (testWriteConflict) {
TestData.txnNumber++;
populateCollection();
- // Start a command that hangs before checking for interrupt.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
- let awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
-
- // Kill the command, and check that it is set to killPending.
- let opId = waitForOpId(curOpFilter);
- assert.commandWorked(db.killOp(opId));
- assertKillPending(opId);
-
- // Remove the hang, and check that the command is killed.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- let exitCode = awaitCommand({checkExitSuccess: false});
- assert.neq(0, exitCode, "Expected shell to exit with failure due to operation kill");
-
- //
- // Test that the command does not yield locks.
- //
-
- TestData.txnNumber++;
- populateCollection();
-
- // Start a command that hangs before checking for interrupt.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
- awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
- waitForOpId(curOpFilter);
-
- // Start a drop. This should block behind the command, since the command does not yield
- // locks.
- let awaitDrop = startParallelShell(function() {
- db.getSiblingDB("test").coll.drop({writeConcern: {w: "majority"}});
- }, rst.ports[0]);
-
- // Remove the hang. The command should complete successfully.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- awaitCommand();
-
- // Now the drop can complete.
- awaitDrop();
-
- //
- // Test that the command does not read data that is inserted during its execution.
- // 'awaitCommandFn' should fail if it reads the following document:
- // {_id: <numDocs>, x: 1, new: 1, location: [0, 0]}
- //
-
- TestData.txnNumber++;
- populateCollection();
+ // Insert the document that the command will write to.
+ assert.commandWorked(db.coll.insert({_id: TestData.numDocs, x: 1, new: 1, location: [0, 0]},
+ {writeConcern: {w: "majority"}}));
// Start a command that hangs before checking for interrupt.
assert.commandWorked(db.adminCommand(
@@ -156,177 +184,142 @@
awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
waitForOpId(curOpFilter);
- // Insert data that should not be read by the command.
- assert.commandWorked(db.coll.insert({_id: TestData.numDocs, x: 1, new: 1, location: [0, 0]},
- {writeConcern: {w: "majority"}}));
+ // Update the document that the command will write to.
+ assert.commandWorked(db.coll.update(
+ {_id: TestData.numDocs}, {$set: {conflict: true}}, {writeConcern: {w: "majority"}}));
- // Remove the hang. The command should complete successfully.
+ // Remove the hang. The command should fail.
assert.commandWorked(db.adminCommand(
{configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- awaitCommand();
-
- //
- // Test that the command fails if a write conflict occurs. 'awaitCommandFn' should write to
- // the following document: {_id: <numDocs>, x: 1, new: 1, location: [0, 0]}
- //
-
- if (testWriteConflict) {
- TestData.txnNumber++;
- populateCollection();
-
- // Insert the document that the command will write to.
- assert.commandWorked(
- db.coll.insert({_id: TestData.numDocs, x: 1, new: 1, location: [0, 0]},
- {writeConcern: {w: "majority"}}));
-
- // Start a command that hangs before checking for interrupt.
- assert.commandWorked(db.adminCommand({
- configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang",
- mode: "alwaysOn"
- }));
- awaitCommand = startParallelShell(awaitCommandFn, rst.ports[0]);
- waitForOpId(curOpFilter);
-
- // Update the document that the command will write to.
- assert.commandWorked(db.coll.update({_id: TestData.numDocs},
- {$set: {conflict: true}},
- {writeConcern: {w: "majority"}}));
-
- // Remove the hang. The command should fail.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- exitCode = awaitCommand({checkExitSuccess: false});
- assert.neq(0, exitCode, "Expected shell to exit with failure due to WriteConflict");
- }
+ exitCode = awaitCommand({checkExitSuccess: false});
+ assert.neq(0, exitCode, "Expected shell to exit with failure due to WriteConflict");
}
-
- // Test find.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand({find: "coll", filter: {x: 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(res.cursor.firstBatch.length, TestData.numDocs, tojson(res));
- }, {"command.filter": {x: 1}});
-
- // Test getMore on a find established cursor.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- const initialFindBatchSize = 2;
- const cursorId = assert
- .commandWorked(sessionDb.runCommand(
- {find: "coll", filter: {x: 1}, batchSize: initialFindBatchSize}))
- .cursor.id;
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
- const res = assert.commandWorked(sessionDb.runCommand(
- {getMore: NumberLong(cursorId), collection: "coll", batchSize: TestData.numDocs}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(
- res.cursor.nextBatch.length, TestData.numDocs - initialFindBatchSize, tojson(res));
- }, {"cursor.originatingCommand.filter": {x: 1}});
-
- // Test aggregate.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(
- sessionDb.runCommand({aggregate: "coll", pipeline: [{$match: {x: 1}}], cursor: {}}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(res.cursor.firstBatch.length, TestData.numDocs, tojson(res));
- }, {"command.pipeline": [{$match: {x: 1}}]});
-
- // Test getMore with an initial find batchSize of 0. Interrupt behavior of a getMore is not
- // expected to change with a change of batchSize in the originating command.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
- const initialFindBatchSize = 0;
- const cursorId = assert
- .commandWorked(sessionDb.runCommand(
- {find: "coll", filter: {x: 1}, batchSize: initialFindBatchSize}))
- .cursor.id;
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
- const res = assert.commandWorked(
- sessionDb.runCommand({getMore: NumberLong(cursorId), collection: "coll"}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(
- res.cursor.nextBatch.length, TestData.numDocs - initialFindBatchSize, tojson(res));
- }, {"cursor.originatingCommand.filter": {x: 1}});
-
- // Test distinct.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand({distinct: "coll", key: "_id"}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert(res.hasOwnProperty("values"));
- assert.eq(res.values.length, 4, tojson(res));
- }, {"command.distinct": "coll"});
-
- // Test update.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand({
- update: "coll",
- updates:
- [{q: {}, u: {$set: {updated: true}}}, {q: {new: 1}, u: {$set: {updated: true}}}]
- }));
- assert.commandWorked(session.commitTransaction_forTesting());
- // Only update one existing doc committed before the transaction.
- assert.eq(res.n, 1, tojson(res));
- assert.eq(res.nModified, 1, tojson(res));
- }, {op: "update"}, true);
-
- // Test delete.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand(
- {delete: "coll", deletes: [{q: {}, limit: 1}, {q: {new: 1}, limit: 1}]}));
- assert.commandWorked(session.commitTransaction_forTesting());
- // Only remove one existing doc committed before the transaction.
- assert.eq(res.n, 1, tojson(res));
- }, {op: "remove"}, true);
-
- // Test findAndModify.
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand(
- {findAndModify: "coll", query: {new: 1}, update: {$set: {findAndModify: 1}}}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert(res.hasOwnProperty("lastErrorObject"));
- assert.eq(res.lastErrorObject.n, 0, tojson(res));
- assert.eq(res.lastErrorObject.updatedExisting, false, tojson(res));
- }, {"command.findAndModify": "coll"}, true);
-
- testCommand(function() {
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
- session.startTransaction({readConcern: {level: "snapshot"}});
- const res = assert.commandWorked(sessionDb.runCommand(
- {findAndModify: "coll", query: {new: 1}, update: {$set: {findAndModify: 1}}}));
- assert.commandWorked(session.commitTransaction_forTesting());
- assert(res.hasOwnProperty("lastErrorObject"));
- assert.eq(res.lastErrorObject.n, 0, tojson(res));
- assert.eq(res.lastErrorObject.updatedExisting, false, tojson(res));
- }, {"command.findAndModify": "coll"}, true);
-
- rst.stopSet();
+}
+
+// Test find.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand({find: "coll", filter: {x: 1}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert.eq(res.cursor.firstBatch.length, TestData.numDocs, tojson(res));
+}, {"command.filter": {x: 1}});
+
+// Test getMore on a find established cursor.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ const initialFindBatchSize = 2;
+ const cursorId = assert
+ .commandWorked(sessionDb.runCommand(
+ {find: "coll", filter: {x: 1}, batchSize: initialFindBatchSize}))
+ .cursor.id;
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ const res = assert.commandWorked(sessionDb.runCommand(
+ {getMore: NumberLong(cursorId), collection: "coll", batchSize: TestData.numDocs}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert.eq(res.cursor.nextBatch.length, TestData.numDocs - initialFindBatchSize, tojson(res));
+}, {"cursor.originatingCommand.filter": {x: 1}});
+
+// Test aggregate.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(
+ sessionDb.runCommand({aggregate: "coll", pipeline: [{$match: {x: 1}}], cursor: {}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert.eq(res.cursor.firstBatch.length, TestData.numDocs, tojson(res));
+}, {"command.pipeline": [{$match: {x: 1}}]});
+
+// Test getMore with an initial find batchSize of 0. Interrupt behavior of a getMore is not
+// expected to change with a change of batchSize in the originating command.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "off"}));
+ const initialFindBatchSize = 0;
+ const cursorId = assert
+ .commandWorked(sessionDb.runCommand(
+ {find: "coll", filter: {x: 1}, batchSize: initialFindBatchSize}))
+ .cursor.id;
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "setInterruptOnlyPlansCheckForInterruptHang", mode: "alwaysOn"}));
+ const res = assert.commandWorked(
+ sessionDb.runCommand({getMore: NumberLong(cursorId), collection: "coll"}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert.eq(res.cursor.nextBatch.length, TestData.numDocs - initialFindBatchSize, tojson(res));
+}, {"cursor.originatingCommand.filter": {x: 1}});
+
+// Test distinct.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand({distinct: "coll", key: "_id"}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert(res.hasOwnProperty("values"));
+ assert.eq(res.values.length, 4, tojson(res));
+}, {"command.distinct": "coll"});
+
+// Test update.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand({
+ update: "coll",
+ updates: [{q: {}, u: {$set: {updated: true}}}, {q: {new: 1}, u: {$set: {updated: true}}}]
+ }));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ // Only update one existing doc committed before the transaction.
+ assert.eq(res.n, 1, tojson(res));
+ assert.eq(res.nModified, 1, tojson(res));
+}, {op: "update"}, true);
+
+// Test delete.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand(
+ {delete: "coll", deletes: [{q: {}, limit: 1}, {q: {new: 1}, limit: 1}]}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ // Only remove one existing doc committed before the transaction.
+ assert.eq(res.n, 1, tojson(res));
+}, {op: "remove"}, true);
+
+// Test findAndModify.
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand(
+ {findAndModify: "coll", query: {new: 1}, update: {$set: {findAndModify: 1}}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert(res.hasOwnProperty("lastErrorObject"));
+ assert.eq(res.lastErrorObject.n, 0, tojson(res));
+ assert.eq(res.lastErrorObject.updatedExisting, false, tojson(res));
+}, {"command.findAndModify": "coll"}, true);
+
+testCommand(function() {
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase("test");
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ const res = assert.commandWorked(sessionDb.runCommand(
+ {findAndModify: "coll", query: {new: 1}, update: {$set: {findAndModify: 1}}}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ assert(res.hasOwnProperty("lastErrorObject"));
+ assert.eq(res.lastErrorObject.n, 0, tojson(res));
+ assert.eq(res.lastErrorObject.updatedExisting, false, tojson(res));
+}, {"command.findAndModify": "coll"}, true);
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js
index debdec99faf..3e03b8124ae 100644
--- a/jstests/noPassthrough/read_majority.js
+++ b/jstests/noPassthrough/read_majority.js
@@ -17,211 +17,209 @@
load("jstests/libs/analyze_plan.js");
(function() {
- "use strict";
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
+"use strict";
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+// Tests the functionality for committed reads for the given read concern level.
+function testReadConcernLevel(level) {
+ var replTest = new ReplSetTest({
+ nodes: 1,
+ oplogSize: 2,
+ nodeOptions:
+ {setParameter: 'testingSnapshotBehaviorInIsolation=true', enableMajorityReadConcern: ''}
+ });
+ replTest.startSet();
+ // Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation'
+ // set.
+ replTest.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+ const session =
+ replTest.getPrimary().getDB("test").getMongo().startSession({causalConsistency: false});
+ const db = session.getDatabase("test");
+ const t = db.coll;
+
+ function assertNoSnapshotAvailableForReadConcernLevel() {
+ var res =
+ t.runCommand('find', {batchSize: 2, readConcern: {level: level}, maxTimeMS: 1000});
+ assert.commandFailed(res);
+ assert.eq(res.code, ErrorCodes.MaxTimeMSExpired);
}
- // Tests the functionality for committed reads for the given read concern level.
- function testReadConcernLevel(level) {
- var replTest = new ReplSetTest({
- nodes: 1,
- oplogSize: 2,
- nodeOptions: {
- setParameter: 'testingSnapshotBehaviorInIsolation=true',
- enableMajorityReadConcern: ''
- }
- });
- replTest.startSet();
- // Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation'
- // set.
- replTest.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
-
- const session =
- replTest.getPrimary().getDB("test").getMongo().startSession({causalConsistency: false});
- const db = session.getDatabase("test");
- const t = db.coll;
-
- function assertNoSnapshotAvailableForReadConcernLevel() {
- var res =
- t.runCommand('find', {batchSize: 2, readConcern: {level: level}, maxTimeMS: 1000});
- assert.commandFailed(res);
- assert.eq(res.code, ErrorCodes.MaxTimeMSExpired);
- }
-
- function getCursorForReadConcernLevel() {
- var res = t.runCommand('find', {batchSize: 2, readConcern: {level: level}});
- assert.commandWorked(res);
- return new DBCommandCursor(db, res, 2, undefined);
- }
+ function getCursorForReadConcernLevel() {
+ var res = t.runCommand('find', {batchSize: 2, readConcern: {level: level}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(db, res, 2, undefined);
+ }
- function getAggCursorForReadConcernLevel() {
- var res = t.runCommand(
- 'aggregate', {pipeline: [], cursor: {batchSize: 2}, readConcern: {level: level}});
- assert.commandWorked(res);
- return new DBCommandCursor(db, res, 2, undefined);
- }
+ function getAggCursorForReadConcernLevel() {
+ var res = t.runCommand('aggregate',
+ {pipeline: [], cursor: {batchSize: 2}, readConcern: {level: level}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(db, res, 2, undefined);
+ }
- function getExplainPlan(query) {
- var res = db.runCommand({explain: {find: t.getName(), filter: query}});
- return assert.commandWorked(res).queryPlanner.winningPlan;
- }
+ function getExplainPlan(query) {
+ var res = db.runCommand({explain: {find: t.getName(), filter: query}});
+ return assert.commandWorked(res).queryPlanner.winningPlan;
+ }
- //
- // Actual Test
- //
-
- // Ensure killOp will work on an op that is waiting for snapshots to be created
- var blockedReader = startParallelShell(
- "const session = db.getMongo().startSession({causalConsistency: false}); " +
- "const sessionDB = session.getDatabase(db.getName()); " +
- "sessionDB.coll.runCommand('find', {batchSize: 2, readConcern: {level: \"" + level +
- "\"}});",
- replTest.ports[0]);
-
- assert.soon(function() {
- var curOps = db.currentOp(true);
- jsTestLog("curOp output: " + tojson(curOps));
- for (var i in curOps.inprog) {
- var op = curOps.inprog[i];
- if (op.op === 'query' && op.ns === "test.$cmd" && op.command.find === 'coll') {
- db.killOp(op.opid);
- return true;
- }
+ //
+ // Actual Test
+ //
+
+ // Ensure killOp will work on an op that is waiting for snapshots to be created
+ var blockedReader = startParallelShell(
+ "const session = db.getMongo().startSession({causalConsistency: false}); " +
+ "const sessionDB = session.getDatabase(db.getName()); " +
+ "sessionDB.coll.runCommand('find', {batchSize: 2, readConcern: {level: \"" + level +
+ "\"}});",
+ replTest.ports[0]);
+
+ assert.soon(function() {
+ var curOps = db.currentOp(true);
+ jsTestLog("curOp output: " + tojson(curOps));
+ for (var i in curOps.inprog) {
+ var op = curOps.inprog[i];
+ if (op.op === 'query' && op.ns === "test.$cmd" && op.command.find === 'coll') {
+ db.killOp(op.opid);
+ return true;
}
- return false;
- }, "could not kill an op that was waiting for a snapshot", 60 * 1000);
- blockedReader();
-
- var snapshot1 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assert.commandWorked(db.runCommand({create: "coll"}));
- var snapshot2 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-
- for (var i = 0; i < 10; i++) {
- assert.writeOK(t.insert({_id: i, version: 3}));
}
+ return false;
+ }, "could not kill an op that was waiting for a snapshot", 60 * 1000);
+ blockedReader();
- assertNoSnapshotAvailableForReadConcernLevel();
-
- var snapshot3 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-
- assertNoSnapshotAvailableForReadConcernLevel();
-
- assert.writeOK(t.update({}, {$set: {version: 4}}, false, true));
- var snapshot4 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-
- // Collection didn't exist in snapshot 1.
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot1}));
- assertNoSnapshotAvailableForReadConcernLevel();
-
- // Collection existed but was empty in snapshot 2.
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot2}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 0);
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 0);
-
- // In snapshot 3 the collection was filled with {version: 3} documents.
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot3}));
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 10);
- getAggCursorForReadConcernLevel().forEach(function(doc) {
- // Note: agg uses internal batching so can't reliably test flipping snapshot. However,
- // it uses the same mechanism as find, so if one works, both should.
- assert.eq(doc.version, 3);
- });
-
- assert.eq(getCursorForReadConcernLevel().itcount(), 10);
- var cursor = getCursorForReadConcernLevel(); // Note: uses batchsize=2.
- assert.eq(cursor.next().version, 3);
- assert.eq(cursor.next().version, 3);
- assert(!cursor.objsLeftInBatch());
-
- // In snapshot 4 the collection was filled with {version: 3} documents.
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot4}));
-
- // This triggers a getMore which sees the new version.
- assert.eq(cursor.next().version, 4);
- assert.eq(cursor.next().version, 4);
-
- // Adding an index bumps the min snapshot for a collection as of SERVER-20260. This may
- // change to just filter that index out from query planning as part of SERVER-20439.
- t.ensureIndex({version: 1});
- assertNoSnapshotAvailableForReadConcernLevel();
-
- // To use the index, a snapshot created after the index was completed must be marked
- // committed.
- var newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assertNoSnapshotAvailableForReadConcernLevel();
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 10);
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 10);
- assert(isIxscan(db, getExplainPlan({version: 1})));
-
- // Dropping an index does bump the min snapshot.
- t.dropIndex({version: 1});
- assertNoSnapshotAvailableForReadConcernLevel();
-
- // To use the collection again, a snapshot created after the dropIndex must be marked
- // committed.
- newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assertNoSnapshotAvailableForReadConcernLevel();
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 10);
-
- // Reindex bumps the min snapshot.
- assert.writeOK(t.bump.insert({a: 1})); // Bump timestamp.
- t.reIndex();
- assertNoSnapshotAvailableForReadConcernLevel();
- newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assertNoSnapshotAvailableForReadConcernLevel();
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 10);
-
- // Dropping the collection is visible in the committed snapshot, even though it hasn't been
- // marked committed yet. This is allowed by the current specification even though it
- // violates strict read-committed semantics since we don't guarantee them on metadata
- // operations.
- t.drop();
- assert.eq(getCursorForReadConcernLevel().itcount(), 0);
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 0);
-
- // Creating a new collection with the same name hides the collection until that operation is
- // in the committed view.
- t.insert({_id: 0, version: 8});
- assertNoSnapshotAvailableForReadConcernLevel();
- newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- assertNoSnapshotAvailableForReadConcernLevel();
- assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
- assert.eq(getCursorForReadConcernLevel().itcount(), 1);
- assert.eq(getAggCursorForReadConcernLevel().itcount(), 1);
-
- // Commands that only support read concern 'local', (such as ping) must work when it is
- // explicitly specified and fail for majority-committed read concern levels.
- assert.commandWorked(db.adminCommand({ping: 1, readConcern: {level: 'local'}}));
- var res = assert.commandFailed(db.adminCommand({ping: 1, readConcern: {level: level}}));
- assert.eq(res.code, ErrorCodes.InvalidOptions);
-
- // Agg $out supports majority committed reads.
- assert.commandWorked(t.runCommand(
- 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: 'local'}}));
- assert.commandWorked(t.runCommand(
- 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: level}}));
-
- replTest.stopSet();
- }
-
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- const db = conn.getDB("test");
- const supportsCommittedReads =
- assert.commandWorked(db.serverStatus()).storageEngine.supportsCommittedReads;
- MongoRunner.stopMongod(conn);
+ var snapshot1 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assert.commandWorked(db.runCommand({create: "coll"}));
+ var snapshot2 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
- if (supportsCommittedReads) {
- testReadConcernLevel("majority");
+ for (var i = 0; i < 10; i++) {
+ assert.writeOK(t.insert({_id: i, version: 3}));
}
+
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ var snapshot3 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ assert.writeOK(t.update({}, {$set: {version: 4}}, false, true));
+ var snapshot4 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+
+ // Collection didn't exist in snapshot 1.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot1}));
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ // Collection existed but was empty in snapshot 2.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot2}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 0);
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 0);
+
+ // In snapshot 3 the collection was filled with {version: 3} documents.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot3}));
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 10);
+ getAggCursorForReadConcernLevel().forEach(function(doc) {
+ // Note: agg uses internal batching so can't reliably test flipping snapshot. However,
+ // it uses the same mechanism as find, so if one works, both should.
+ assert.eq(doc.version, 3);
+ });
+
+ assert.eq(getCursorForReadConcernLevel().itcount(), 10);
+ var cursor = getCursorForReadConcernLevel(); // Note: uses batchsize=2.
+ assert.eq(cursor.next().version, 3);
+ assert.eq(cursor.next().version, 3);
+ assert(!cursor.objsLeftInBatch());
+
+ // In snapshot 4 the collection was filled with {version: 3} documents.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot4}));
+
+ // This triggers a getMore which sees the new version.
+ assert.eq(cursor.next().version, 4);
+ assert.eq(cursor.next().version, 4);
+
+ // Adding an index bumps the min snapshot for a collection as of SERVER-20260. This may
+ // change to just filter that index out from query planning as part of SERVER-20439.
+ t.ensureIndex({version: 1});
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ // To use the index, a snapshot created after the index was completed must be marked
+ // committed.
+ var newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoSnapshotAvailableForReadConcernLevel();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 10);
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 10);
+ assert(isIxscan(db, getExplainPlan({version: 1})));
+
+ // Dropping an index does bump the min snapshot.
+ t.dropIndex({version: 1});
+ assertNoSnapshotAvailableForReadConcernLevel();
+
+ // To use the collection again, a snapshot created after the dropIndex must be marked
+ // committed.
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoSnapshotAvailableForReadConcernLevel();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 10);
+
+ // Reindex bumps the min snapshot.
+ assert.writeOK(t.bump.insert({a: 1})); // Bump timestamp.
+ t.reIndex();
+ assertNoSnapshotAvailableForReadConcernLevel();
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoSnapshotAvailableForReadConcernLevel();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 10);
+
+ // Dropping the collection is visible in the committed snapshot, even though it hasn't been
+ // marked committed yet. This is allowed by the current specification even though it
+ // violates strict read-committed semantics since we don't guarantee them on metadata
+ // operations.
+ t.drop();
+ assert.eq(getCursorForReadConcernLevel().itcount(), 0);
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 0);
+
+ // Creating a new collection with the same name hides the collection until that operation is
+ // in the committed view.
+ t.insert({_id: 0, version: 8});
+ assertNoSnapshotAvailableForReadConcernLevel();
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoSnapshotAvailableForReadConcernLevel();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getCursorForReadConcernLevel().itcount(), 1);
+ assert.eq(getAggCursorForReadConcernLevel().itcount(), 1);
+
+ // Commands that only support read concern 'local', (such as ping) must work when it is
+ // explicitly specified and fail for majority-committed read concern levels.
+ assert.commandWorked(db.adminCommand({ping: 1, readConcern: {level: 'local'}}));
+ var res = assert.commandFailed(db.adminCommand({ping: 1, readConcern: {level: level}}));
+ assert.eq(res.code, ErrorCodes.InvalidOptions);
+
+ // Agg $out supports majority committed reads.
+ assert.commandWorked(t.runCommand(
+ 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: 'local'}}));
+ assert.commandWorked(t.runCommand(
+ 'aggregate', {pipeline: [{$out: 'out'}], cursor: {}, readConcern: {level: level}}));
+
+ replTest.stopSet();
+}
+
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
+const db = conn.getDB("test");
+const supportsCommittedReads =
+ assert.commandWorked(db.serverStatus()).storageEngine.supportsCommittedReads;
+MongoRunner.stopMongod(conn);
+
+if (supportsCommittedReads) {
+ testReadConcernLevel("majority");
+}
}());
diff --git a/jstests/noPassthrough/read_majority_reads.js b/jstests/noPassthrough/read_majority_reads.js
index 578f17d748f..f76363a0b28 100644
--- a/jstests/noPassthrough/read_majority_reads.js
+++ b/jstests/noPassthrough/read_majority_reads.js
@@ -15,233 +15,232 @@
*/
(function() {
- 'use strict';
-
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
-
- var testServer = MongoRunner.runMongod();
- var db = testServer.getDB("test");
- if (!db.serverStatus().storageEngine.supportsCommittedReads) {
- print("Skipping read_majority.js since storageEngine doesn't support it.");
- MongoRunner.stopMongod(testServer);
- return;
- }
+'use strict';
+
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
+
+var testServer = MongoRunner.runMongod();
+var db = testServer.getDB("test");
+if (!db.serverStatus().storageEngine.supportsCommittedReads) {
+ print("Skipping read_majority.js since storageEngine doesn't support it.");
MongoRunner.stopMongod(testServer);
-
- function makeCursor(db, result) {
- return new DBCommandCursor(db, result);
- }
-
- // These test cases are functions that return a cursor of the documents in collections without
- // fetching them yet.
- var cursorTestCases = {
- find: function(coll) {
- return makeCursor(coll.getDB(),
- assert.commandWorked(coll.runCommand(
- 'find', {readConcern: {level: 'majority'}, batchSize: 0})));
- },
- aggregate: function(coll) {
- return makeCursor(
- coll.getDB(),
- assert.commandWorked(coll.runCommand(
- 'aggregate',
- {readConcern: {level: 'majority'}, cursor: {batchSize: 0}, pipeline: []})));
- },
- aggregateGeoNear: function(coll) {
- return makeCursor(coll.getDB(), assert.commandWorked(coll.runCommand('aggregate', {
- readConcern: {level: 'majority'},
- cursor: {batchSize: 0},
- pipeline: [{$geoNear: {near: [0, 0], distanceField: "d", spherical: true}}]
- })));
- },
- };
-
- // These test cases have a run method that will be passed a collection with a single object with
- // _id: 1 and a state field that equals either "before" or "after". The collection will also
- // contain both a 2dsphere and a geoHaystack index to enable testing commands that depend on
- // them. The return value from the run method is expected to be the value of expectedBefore or
- // expectedAfter depending on the state of the state field.
- var nonCursorTestCases = {
- count_before: {
- run: function(coll) {
- var res = coll.runCommand(
- 'count', {readConcern: {level: 'majority'}, query: {state: 'before'}});
- assert.commandWorked(res);
- return res.n;
- },
- expectedBefore: 1,
- expectedAfter: 0,
+ return;
+}
+MongoRunner.stopMongod(testServer);
+
+function makeCursor(db, result) {
+ return new DBCommandCursor(db, result);
+}
+
+// These test cases are functions that return a cursor of the documents in collections without
+// fetching them yet.
+var cursorTestCases = {
+ find: function(coll) {
+ return makeCursor(coll.getDB(),
+ assert.commandWorked(coll.runCommand(
+ 'find', {readConcern: {level: 'majority'}, batchSize: 0})));
+ },
+ aggregate: function(coll) {
+ return makeCursor(
+ coll.getDB(),
+ assert.commandWorked(coll.runCommand(
+ 'aggregate',
+ {readConcern: {level: 'majority'}, cursor: {batchSize: 0}, pipeline: []})));
+ },
+ aggregateGeoNear: function(coll) {
+ return makeCursor(coll.getDB(), assert.commandWorked(coll.runCommand('aggregate', {
+ readConcern: {level: 'majority'},
+ cursor: {batchSize: 0},
+ pipeline: [{$geoNear: {near: [0, 0], distanceField: "d", spherical: true}}]
+ })));
+ },
+};
+
+// These test cases have a run method that will be passed a collection with a single object with
+// _id: 1 and a state field that equals either "before" or "after". The collection will also
+// contain both a 2dsphere and a geoHaystack index to enable testing commands that depend on
+// them. The return value from the run method is expected to be the value of expectedBefore or
+// expectedAfter depending on the state of the state field.
+var nonCursorTestCases = {
+ count_before: {
+ run: function(coll) {
+ var res = coll.runCommand('count',
+ {readConcern: {level: 'majority'}, query: {state: 'before'}});
+ assert.commandWorked(res);
+ return res.n;
},
- count_after: {
- run: function(coll) {
- var res = coll.runCommand(
- 'count', {readConcern: {level: 'majority'}, query: {state: 'after'}});
- assert.commandWorked(res);
- return res.n;
- },
- expectedBefore: 0,
- expectedAfter: 1,
+ expectedBefore: 1,
+ expectedAfter: 0,
+ },
+ count_after: {
+ run: function(coll) {
+ var res = coll.runCommand('count',
+ {readConcern: {level: 'majority'}, query: {state: 'after'}});
+ assert.commandWorked(res);
+ return res.n;
},
- distinct: {
- run: function(coll) {
- var res =
- coll.runCommand('distinct', {readConcern: {level: 'majority'}, key: 'state'});
- assert.commandWorked(res);
- assert.eq(res.values.length, 1, tojson(res));
- return res.values[0];
- },
- expectedBefore: 'before',
- expectedAfter: 'after',
+ expectedBefore: 0,
+ expectedAfter: 1,
+ },
+ distinct: {
+ run: function(coll) {
+ var res = coll.runCommand('distinct', {readConcern: {level: 'majority'}, key: 'state'});
+ assert.commandWorked(res);
+ assert.eq(res.values.length, 1, tojson(res));
+ return res.values[0];
},
- geoSearch: {
- run: function(coll) {
- var res = coll.runCommand('geoSearch', {
- readConcern: {level: 'majority'},
- near: [0, 0],
- search: {_id: 1}, // Needed due to SERVER-23158.
- maxDistance: 1,
- });
- assert.commandWorked(res);
- assert.eq(res.results.length, 1, tojson(res));
- return res.results[0].state;
- },
- expectedBefore: 'before',
- expectedAfter: 'after',
+ expectedBefore: 'before',
+ expectedAfter: 'after',
+ },
+ geoSearch: {
+ run: function(coll) {
+ var res = coll.runCommand('geoSearch', {
+ readConcern: {level: 'majority'},
+ near: [0, 0],
+ search: {_id: 1}, // Needed due to SERVER-23158.
+ maxDistance: 1,
+ });
+ assert.commandWorked(res);
+ assert.eq(res.results.length, 1, tojson(res));
+ return res.results[0].state;
},
- };
-
- function runTests(coll, mongodConnection) {
- function makeSnapshot() {
- return assert.commandWorked(mongodConnection.adminCommand("makeSnapshot")).name;
- }
- function setCommittedSnapshot(snapshot) {
- assert.commandWorked(mongodConnection.adminCommand({"setCommittedSnapshot": snapshot}));
- }
-
- assert.commandWorked(coll.createIndex({point: '2dsphere'}));
- for (var testName in cursorTestCases) {
- jsTestLog('Running ' + testName + ' against ' + coll.toString());
- var getCursor = cursorTestCases[testName];
-
- // Setup initial state.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
- setCommittedSnapshot(makeSnapshot());
-
- // Check initial conditions.
- assert.eq(getCursor(coll).next().state, 'before');
-
- // Change state without making it committed.
- assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
-
- // Cursor still sees old state.
- assert.eq(getCursor(coll).next().state, 'before');
-
- // Create a cursor before the update is visible.
- var oldCursor = getCursor(coll);
-
- // Making a snapshot doesn't make the update visible yet.
- var snapshot = makeSnapshot();
- assert.eq(getCursor(coll).next().state, 'before');
-
- // Setting it as committed does for both new and old cursors.
- setCommittedSnapshot(snapshot);
- assert.eq(getCursor(coll).next().state, 'after');
- assert.eq(oldCursor.next().state, 'after');
- }
-
- assert.commandWorked(coll.ensureIndex({point: 'geoHaystack', _id: 1}, {bucketSize: 1}));
- for (var testName in nonCursorTestCases) {
- jsTestLog('Running ' + testName + ' against ' + coll.toString());
- var getResult = nonCursorTestCases[testName].run;
- var expectedBefore = nonCursorTestCases[testName].expectedBefore;
- var expectedAfter = nonCursorTestCases[testName].expectedAfter;
-
- // Setup initial state.
- assert.writeOK(coll.remove({}));
- assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
- setCommittedSnapshot(makeSnapshot());
-
- // Check initial conditions.
- assert.eq(getResult(coll), expectedBefore);
-
- // Change state without making it committed.
- assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
-
- // Cursor still sees old state.
- assert.eq(getResult(coll), expectedBefore);
-
- // Making a snapshot doesn't make the update visible yet.
- var snapshot = makeSnapshot();
- assert.eq(getResult(coll), expectedBefore);
-
- // Setting it as committed does.
- setCommittedSnapshot(snapshot);
- assert.eq(getResult(coll), expectedAfter);
- }
+ expectedBefore: 'before',
+ expectedAfter: 'after',
+ },
+};
+
+function runTests(coll, mongodConnection) {
+ function makeSnapshot() {
+ return assert.commandWorked(mongodConnection.adminCommand("makeSnapshot")).name;
+ }
+ function setCommittedSnapshot(snapshot) {
+ assert.commandWorked(mongodConnection.adminCommand({"setCommittedSnapshot": snapshot}));
+ }
+
+ assert.commandWorked(coll.createIndex({point: '2dsphere'}));
+ for (var testName in cursorTestCases) {
+ jsTestLog('Running ' + testName + ' against ' + coll.toString());
+ var getCursor = cursorTestCases[testName];
+
+ // Setup initial state.
+ assert.writeOK(coll.remove({}));
+ assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // Check initial conditions.
+ assert.eq(getCursor(coll).next().state, 'before');
+
+ // Change state without making it committed.
+ assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
+
+ // Cursor still sees old state.
+ assert.eq(getCursor(coll).next().state, 'before');
+
+ // Create a cursor before the update is visible.
+ var oldCursor = getCursor(coll);
+
+ // Making a snapshot doesn't make the update visible yet.
+ var snapshot = makeSnapshot();
+ assert.eq(getCursor(coll).next().state, 'before');
+
+ // Setting it as committed does for both new and old cursors.
+ setCommittedSnapshot(snapshot);
+ assert.eq(getCursor(coll).next().state, 'after');
+ assert.eq(oldCursor.next().state, 'after');
+ }
+
+ assert.commandWorked(coll.ensureIndex({point: 'geoHaystack', _id: 1}, {bucketSize: 1}));
+ for (var testName in nonCursorTestCases) {
+ jsTestLog('Running ' + testName + ' against ' + coll.toString());
+ var getResult = nonCursorTestCases[testName].run;
+ var expectedBefore = nonCursorTestCases[testName].expectedBefore;
+ var expectedAfter = nonCursorTestCases[testName].expectedAfter;
+
+ // Setup initial state.
+ assert.writeOK(coll.remove({}));
+ assert.writeOK(coll.save({_id: 1, state: 'before', point: [0, 0]}));
+ setCommittedSnapshot(makeSnapshot());
+
+ // Check initial conditions.
+ assert.eq(getResult(coll), expectedBefore);
+
+ // Change state without making it committed.
+ assert.writeOK(coll.save({_id: 1, state: 'after', point: [0, 0]}));
+
+ // Cursor still sees old state.
+ assert.eq(getResult(coll), expectedBefore);
+
+ // Making a snapshot doesn't make the update visible yet.
+ var snapshot = makeSnapshot();
+ assert.eq(getResult(coll), expectedBefore);
+
+ // Setting it as committed does.
+ setCommittedSnapshot(snapshot);
+ assert.eq(getResult(coll), expectedAfter);
+ }
+}
+
+var replTest = new ReplSetTest({
+ nodes: 1,
+ oplogSize: 2,
+ nodeOptions: {
+ setParameter: 'testingSnapshotBehaviorInIsolation=true',
+ enableMajorityReadConcern: '',
+ shardsvr: ''
}
+});
+replTest.startSet();
+// Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation' set.
+replTest.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+var mongod = replTest.getPrimary();
+
+(function testSingleNode() {
+ var db = mongod.getDB("singleNode");
+ runTests(db.collection, mongod);
+})();
+
+var shardingTest = new ShardingTest({
+ shards: 0,
+ mongos: 1,
+});
+assert(shardingTest.adminCommand({addShard: replTest.getURL()}));
+
+// Remove tests of commands that aren't supported at all through mongos, even on unsharded
+// collections.
+['geoSearch'].forEach(function(cmd) {
+ // Make sure it really isn't supported.
+ assert.eq(shardingTest.getDB('test').coll.runCommand(cmd).code, ErrorCodes.CommandNotFound);
+ delete cursorTestCases[cmd];
+ delete nonCursorTestCases[cmd];
+});
+
+(function testUnshardedDBThroughMongos() {
+ var db = shardingTest.getDB("throughMongos");
+ runTests(db.unshardedDB, mongod);
+})();
+
+shardingTest.adminCommand({enableSharding: 'throughMongos'});
+
+(function testUnshardedCollectionThroughMongos() {
+ var db = shardingTest.getDB("throughMongos");
+ runTests(db.unshardedCollection, mongod);
+})();
+
+(function testShardedCollectionThroughMongos() {
+ var db = shardingTest.getDB("throughMongos");
+ var collection = db.shardedCollection;
+ shardingTest.adminCommand({shardCollection: collection.getFullName(), key: {_id: 1}});
+ runTests(collection, mongod);
+})();
- var replTest = new ReplSetTest({
- nodes: 1,
- oplogSize: 2,
- nodeOptions: {
- setParameter: 'testingSnapshotBehaviorInIsolation=true',
- enableMajorityReadConcern: '',
- shardsvr: ''
- }
- });
- replTest.startSet();
- // Cannot wait for a stable recovery timestamp with 'testingSnapshotBehaviorInIsolation' set.
- replTest.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
-
- var mongod = replTest.getPrimary();
-
- (function testSingleNode() {
- var db = mongod.getDB("singleNode");
- runTests(db.collection, mongod);
- })();
-
- var shardingTest = new ShardingTest({
- shards: 0,
- mongos: 1,
- });
- assert(shardingTest.adminCommand({addShard: replTest.getURL()}));
-
- // Remove tests of commands that aren't supported at all through mongos, even on unsharded
- // collections.
- ['geoSearch'].forEach(function(cmd) {
- // Make sure it really isn't supported.
- assert.eq(shardingTest.getDB('test').coll.runCommand(cmd).code, ErrorCodes.CommandNotFound);
- delete cursorTestCases[cmd];
- delete nonCursorTestCases[cmd];
- });
-
- (function testUnshardedDBThroughMongos() {
- var db = shardingTest.getDB("throughMongos");
- runTests(db.unshardedDB, mongod);
- })();
-
- shardingTest.adminCommand({enableSharding: 'throughMongos'});
-
- (function testUnshardedCollectionThroughMongos() {
- var db = shardingTest.getDB("throughMongos");
- runTests(db.unshardedCollection, mongod);
- })();
-
- (function testShardedCollectionThroughMongos() {
- var db = shardingTest.getDB("throughMongos");
- var collection = db.shardedCollection;
- shardingTest.adminCommand({shardCollection: collection.getFullName(), key: {_id: 1}});
- runTests(collection, mongod);
- })();
-
- shardingTest.stop();
- replTest.stopSet();
+shardingTest.stop();
+replTest.stopSet();
})();
diff --git a/jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js b/jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js
index 108860cfd6a..7416b41bde8 100644
--- a/jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js
+++ b/jstests/noPassthrough/rebuild_multiple_indexes_at_startup.js
@@ -5,50 +5,49 @@
* @tags: [requires_persistence, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({
- name: "rebuildMultipleIndexesAtStartup",
- nodes: 2,
- nodeOptions:
- {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
- });
- const nodes = rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ name: "rebuildMultipleIndexesAtStartup",
+ nodes: 2,
+ nodeOptions: {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
+});
+const nodes = rst.startSet();
+rst.initiate();
- if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
- // Only snapshotting storage engines can pause advancing the stable timestamp allowing us
- // to get into a state where indexes exist, but the underlying tables were dropped.
- rst.stopSet();
- return;
- }
+if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
+ // Only snapshotting storage engines can pause advancing the stable timestamp allowing us
+ // to get into a state where indexes exist, but the underlying tables were dropped.
+ rst.stopSet();
+ return;
+}
- let coll = rst.getPrimary().getDB("indexRebuild")["coll"];
- assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}], {}, {writeConcern: {w: "majority"}}));
- assert.eq(3, coll.getIndexes().length);
- rst.awaitReplication(undefined, ReplSetTest.OpTimeType.LAST_DURABLE);
+let coll = rst.getPrimary().getDB("indexRebuild")["coll"];
+assert.commandWorked(coll.createIndexes([{a: 1}, {b: 1}], {}, {writeConcern: {w: "majority"}}));
+assert.eq(3, coll.getIndexes().length);
+rst.awaitReplication(undefined, ReplSetTest.OpTimeType.LAST_DURABLE);
- // Lock the index entries into a stable checkpoint by shutting down.
- rst.stopSet(undefined, true);
- rst.startSet(undefined, true);
+// Lock the index entries into a stable checkpoint by shutting down.
+rst.stopSet(undefined, true);
+rst.startSet(undefined, true);
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- // Dropping the index would normally modify the collection metadata and drop the
- // table. Because we're not advancing the stable timestamp and we're going to crash the
- // server, the catalog change won't take effect, but the WT table being dropped will.
- coll = rst.getPrimary().getDB("indexRebuild")["coll"];
- assert.commandWorked(coll.dropIndexes());
- rst.awaitReplication();
- rst.stopSet(9, true, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+// Dropping the index would normally modify the collection metadata and drop the
+// table. Because we're not advancing the stable timestamp and we're going to crash the
+// server, the catalog change won't take effect, but the WT table being dropped will.
+coll = rst.getPrimary().getDB("indexRebuild")["coll"];
+assert.commandWorked(coll.dropIndexes());
+rst.awaitReplication();
+rst.stopSet(9, true, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- // Restarting the replica set should rebuild both indexes on both nodes. Just to be dropped
- // again by replication recovery. Starting up successfully is a passing test run.
- rst.startSet(undefined, true);
- coll = rst.getPrimary().getDB("indexRebuild")["coll"];
- assert.eq(1, coll.getIndexes().length);
- rst.stopSet();
+// Restarting the replica set should rebuild both indexes on both nodes. Just to be dropped
+// again by replication recovery. Starting up successfully is a passing test run.
+rst.startSet(undefined, true);
+coll = rst.getPrimary().getDB("indexRebuild")["coll"];
+assert.eq(1, coll.getIndexes().length);
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/recovery_wt_cache_full.js b/jstests/noPassthrough/recovery_wt_cache_full.js
index dff22ad959a..7d7dc171296 100644
--- a/jstests/noPassthrough/recovery_wt_cache_full.js
+++ b/jstests/noPassthrough/recovery_wt_cache_full.js
@@ -4,94 +4,93 @@
* requires_majority_read_concern]
*/
(function() {
- 'use strict';
- load('jstests/libs/check_log.js');
+'use strict';
+load('jstests/libs/check_log.js');
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
- },
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- // Do not specify storage engine in this node's options because this will
- // prevent us from overriding it on restart.
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
- const nodes = rst.startSet({
- // Start with a larger storage engine cache size to allow the secondary to write
- // the oplog entries to disk. This setting will be adjusted downwards upon restart to
- // test recovery behavior as the cache fills up.
- // This overrides the --storageEngineCacheSideGB setting passed to resmoke.py but does not
- // affect the default cache size on restart.
- wiredTigerCacheSizeGB: 10,
- });
- rst.initiate();
+ // Do not specify storage engine in this node's options because this will
+ // prevent us from overriding it on restart.
+ },
+ ]
+});
+const nodes = rst.startSet({
+ // Start with a larger storage engine cache size to allow the secondary to write
+ // the oplog entries to disk. This setting will be adjusted downwards upon restart to
+ // test recovery behavior as the cache fills up.
+ // This overrides the --storageEngineCacheSideGB setting passed to resmoke.py but does not
+ // affect the default cache size on restart.
+ wiredTigerCacheSizeGB: 10,
+});
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- let secondary = rst.getSecondary();
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+let secondary = rst.getSecondary();
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- // Disable snapshotting on secondary so that further operations do not enter the majority
- // snapshot.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+// Disable snapshotting on secondary so that further operations do not enter the majority
+// snapshot.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
- const numUpdates = 500;
- jsTestLog('Writing ' + numUpdates + ' updates to ' + numDocs +
- ' documents on secondary after disabling snapshots.');
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
- }
+const numUpdates = 500;
+jsTestLog('Writing ' + numUpdates + ' updates to ' + numDocs +
+ ' documents on secondary after disabling snapshots.');
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
}
+}
- jsTestLog('Waiting for updates on secondary ' + secondary.host +
- ' to be written to the oplog.');
- rst.awaitReplication();
+jsTestLog('Waiting for updates on secondary ' + secondary.host + ' to be written to the oplog.');
+rst.awaitReplication();
- secondary = rst.restart(1, {
- setParameter: {
- logComponentVerbosity: tojsononeline({storage: {recovery: 2}}),
- },
- // Constrain the storage engine cache size to make it easier to fill it up with unflushed
- // modification.
- wiredTigerCacheSizeGB: 1,
- });
+secondary = rst.restart(1, {
+ setParameter: {
+ logComponentVerbosity: tojsononeline({storage: {recovery: 2}}),
+ },
+ // Constrain the storage engine cache size to make it easier to fill it up with unflushed
+ // modification.
+ wiredTigerCacheSizeGB: 1,
+});
- // Verify storage engine cache size in effect during recovery.
- const actualCacheSizeGB = assert.commandWorked(secondary.adminCommand({getCmdLineOpts: 1}))
- .parsed.storage.wiredTiger.engineConfig.cacheSizeGB;
- jsTestLog('Secondary was restarted with a storage cache size of ' + actualCacheSizeGB + ' GB.');
- assert.eq(1, actualCacheSizeGB);
+// Verify storage engine cache size in effect during recovery.
+const actualCacheSizeGB = assert.commandWorked(secondary.adminCommand({getCmdLineOpts: 1}))
+ .parsed.storage.wiredTiger.engineConfig.cacheSizeGB;
+jsTestLog('Secondary was restarted with a storage cache size of ' + actualCacheSizeGB + ' GB.');
+assert.eq(1, actualCacheSizeGB);
- checkLog.contains(secondary, 'Starting recovery oplog application');
- jsTestLog('Applying updates on secondary ' + secondary.host + ' during recovery.');
+checkLog.contains(secondary, 'Starting recovery oplog application');
+jsTestLog('Applying updates on secondary ' + secondary.host + ' during recovery.');
- // This ensures that the node is able to complete recovery and transition to SECONDARY.
- rst.awaitReplication();
+// This ensures that the node is able to complete recovery and transition to SECONDARY.
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/refresh_logical_session_cache_now.js b/jstests/noPassthrough/refresh_logical_session_cache_now.js
index f1c87d482ea..ac11c138c6f 100644
--- a/jstests/noPassthrough/refresh_logical_session_cache_now.js
+++ b/jstests/noPassthrough/refresh_logical_session_cache_now.js
@@ -1,49 +1,48 @@
(function() {
- "use script";
+"use script";
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- var res;
- var refresh = {refreshLogicalSessionCacheNow: 1};
- var startSession = {startSession: 1};
+var res;
+var refresh = {refreshLogicalSessionCacheNow: 1};
+var startSession = {startSession: 1};
- // Start up a standalone server.
- var conn = MongoRunner.runMongod();
- var admin = conn.getDB("admin");
- var config = conn.getDB("config");
+// Start up a standalone server.
+var conn = MongoRunner.runMongod();
+var admin = conn.getDB("admin");
+var config = conn.getDB("config");
- // Trigger an initial refresh, as a sanity check.
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
+// Trigger an initial refresh, as a sanity check.
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
- // Start a session. Should not be in the collection yet.
- res = admin.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
+// Start a session. Should not be in the collection yet.
+res = admin.runCommand(startSession);
+assert.commandWorked(res, "unable to start session");
- assert.eq(config.system.sessions.count(), 0, "should not have session records yet");
+assert.eq(config.system.sessions.count(), 0, "should not have session records yet");
- // Trigger a refresh. Session should now be in the collection.
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
+// Trigger a refresh. Session should now be in the collection.
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
- assert.eq(config.system.sessions.count(), 1, "should have written session records");
+assert.eq(config.system.sessions.count(), 1, "should have written session records");
- // Start some new sessions. Should not be in the collection yet.
- var numSessions = 100;
- for (var i = 0; i < numSessions; i++) {
- res = admin.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- }
+// Start some new sessions. Should not be in the collection yet.
+var numSessions = 100;
+for (var i = 0; i < numSessions; i++) {
+ res = admin.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+}
- assert.eq(config.system.sessions.count(), 1, "should not have more session records yet");
+assert.eq(config.system.sessions.count(), 1, "should not have more session records yet");
- // Trigger another refresh. All sessions should now be in the collection.
- res = admin.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
+// Trigger another refresh. All sessions should now be in the collection.
+res = admin.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
- assert.eq(
- config.system.sessions.count(), numSessions + 1, "should have written session records");
- MongoRunner.stopMongod(conn);
+assert.eq(config.system.sessions.count(), numSessions + 1, "should have written session records");
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/refresh_sessions_command.js b/jstests/noPassthrough/refresh_sessions_command.js
index 4386b61429e..a0a65fb4695 100644
--- a/jstests/noPassthrough/refresh_sessions_command.js
+++ b/jstests/noPassthrough/refresh_sessions_command.js
@@ -1,96 +1,93 @@
(function() {
- "use strict";
-
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
-
- var conn;
- var admin;
- var result;
- var startSession = {startSession: 1};
-
- // Run initial tests without auth.
- conn = MongoRunner.runMongod();
- admin = conn.getDB("admin");
-
- result = admin.runCommand(startSession);
- assert.commandWorked(result, "failed to startSession");
- var lsid = result.id;
-
- // Test that we can run refreshSessions unauthenticated if --auth is off.
- result = admin.runCommand({refreshSessions: [lsid]});
- assert.commandWorked(result, "could not run refreshSessions unauthenticated without --auth");
-
- // Test that we can run refreshSessions authenticated if --auth is off.
- admin.createUser(
- {user: 'admin', pwd: 'admin', roles: ['readAnyDatabase', 'userAdminAnyDatabase']});
- admin.auth("admin", "admin");
- result = admin.runCommand(startSession);
- var lsid2 = result.id;
- result = admin.runCommand({refreshSessions: [lsid2]});
- assert.commandWorked(result, "could not run refreshSessions logged in with --auth off");
-
- // Turn on auth for further testing.
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({auth: "", nojournal: "", setParameter: {maxSessions: 3}});
- admin = conn.getDB("admin");
-
- admin.createUser(
- {user: 'admin', pwd: 'admin', roles: ['readAnyDatabase', 'userAdminAnyDatabase']});
- admin.auth("admin", "admin");
-
- result = admin.runCommand({
- createRole: 'readSessionsCollection',
- privileges: [{resource: {db: 'config', collection: 'system.sessions'}, actions: ['find']}],
- roles: []
- });
- assert.commandWorked(result, "couldn't make readSessionsCollection role");
-
- admin.createUser(
- {user: 'readSessionsCollection', pwd: 'pwd', roles: ['readSessionsCollection']});
- admin.logout();
-
- // Test that we cannot run refreshSessions unauthenticated if --auth is on.
- result = admin.runCommand({refreshSessions: [lsid]});
- assert.commandFailed(result, "able to run refreshSessions without authenticating");
-
- // Test that we can run refreshSessions on our own sessions authenticated if --auth is on.
- admin.auth("admin", "admin");
- result = admin.runCommand(startSession);
- var lsid3 = result.id;
- result = admin.runCommand({refreshSessions: [lsid3]});
- assert.commandWorked(result, "unable to run refreshSessions while logged in");
-
- // Test that we can refresh "others'" sessions (new ones) when authenticated with --auth.
- result = admin.runCommand({refreshSessions: [lsid]});
- assert.commandWorked(result, "unable to refresh novel lsids");
-
- // Test that sending a mix of known and new sessions is fine
- result = admin.runCommand({refreshSessions: [lsid, lsid2, lsid3]});
- assert.commandWorked(result, "unable to refresh mix of known and unknown lsids");
-
- // Test that sending a set of sessions with duplicates is fine
- result = admin.runCommand({refreshSessions: [lsid, lsid, lsid, lsid]});
- assert.commandWorked(result, "unable to refresh with duplicate lsids in the set");
-
- // Test that we can run refreshSessions with an empty set of sessions.
- result = admin.runCommand({refreshSessions: []});
- assert.commandWorked(result, "unable to refresh empty set of lsids");
-
- // Test that we cannot run refreshSessions when the cache is full.
- var lsid4 = {"id": UUID()};
- result = admin.runCommand({refreshSessions: [lsid4]});
- assert.commandFailed(result, "able to run refreshSessions when the cache is full");
-
- // Test that once we force a refresh, all of these sessions are in the sessions collection.
- admin.logout();
- admin.auth("readSessionsCollection", "pwd");
- result = admin.runCommand({refreshLogicalSessionCacheNow: 1});
- assert.commandWorked(result, "could not force refresh");
-
- var config = conn.getDB("config");
- assert.eq(config.system.sessions.count(), 3, "should have refreshed all session records");
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
+
+var conn;
+var admin;
+var result;
+var startSession = {startSession: 1};
+
+// Run initial tests without auth.
+conn = MongoRunner.runMongod();
+admin = conn.getDB("admin");
+
+result = admin.runCommand(startSession);
+assert.commandWorked(result, "failed to startSession");
+var lsid = result.id;
+
+// Test that we can run refreshSessions unauthenticated if --auth is off.
+result = admin.runCommand({refreshSessions: [lsid]});
+assert.commandWorked(result, "could not run refreshSessions unauthenticated without --auth");
+
+// Test that we can run refreshSessions authenticated if --auth is off.
+admin.createUser({user: 'admin', pwd: 'admin', roles: ['readAnyDatabase', 'userAdminAnyDatabase']});
+admin.auth("admin", "admin");
+result = admin.runCommand(startSession);
+var lsid2 = result.id;
+result = admin.runCommand({refreshSessions: [lsid2]});
+assert.commandWorked(result, "could not run refreshSessions logged in with --auth off");
+
+// Turn on auth for further testing.
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({auth: "", nojournal: "", setParameter: {maxSessions: 3}});
+admin = conn.getDB("admin");
+
+admin.createUser({user: 'admin', pwd: 'admin', roles: ['readAnyDatabase', 'userAdminAnyDatabase']});
+admin.auth("admin", "admin");
+
+result = admin.runCommand({
+ createRole: 'readSessionsCollection',
+ privileges: [{resource: {db: 'config', collection: 'system.sessions'}, actions: ['find']}],
+ roles: []
+});
+assert.commandWorked(result, "couldn't make readSessionsCollection role");
+
+admin.createUser({user: 'readSessionsCollection', pwd: 'pwd', roles: ['readSessionsCollection']});
+admin.logout();
+
+// Test that we cannot run refreshSessions unauthenticated if --auth is on.
+result = admin.runCommand({refreshSessions: [lsid]});
+assert.commandFailed(result, "able to run refreshSessions without authenticating");
+
+// Test that we can run refreshSessions on our own sessions authenticated if --auth is on.
+admin.auth("admin", "admin");
+result = admin.runCommand(startSession);
+var lsid3 = result.id;
+result = admin.runCommand({refreshSessions: [lsid3]});
+assert.commandWorked(result, "unable to run refreshSessions while logged in");
+
+// Test that we can refresh "others'" sessions (new ones) when authenticated with --auth.
+result = admin.runCommand({refreshSessions: [lsid]});
+assert.commandWorked(result, "unable to refresh novel lsids");
+
+// Test that sending a mix of known and new sessions is fine
+result = admin.runCommand({refreshSessions: [lsid, lsid2, lsid3]});
+assert.commandWorked(result, "unable to refresh mix of known and unknown lsids");
+
+// Test that sending a set of sessions with duplicates is fine
+result = admin.runCommand({refreshSessions: [lsid, lsid, lsid, lsid]});
+assert.commandWorked(result, "unable to refresh with duplicate lsids in the set");
+
+// Test that we can run refreshSessions with an empty set of sessions.
+result = admin.runCommand({refreshSessions: []});
+assert.commandWorked(result, "unable to refresh empty set of lsids");
+
+// Test that we cannot run refreshSessions when the cache is full.
+var lsid4 = {"id": UUID()};
+result = admin.runCommand({refreshSessions: [lsid4]});
+assert.commandFailed(result, "able to run refreshSessions when the cache is full");
+
+// Test that once we force a refresh, all of these sessions are in the sessions collection.
+admin.logout();
+admin.auth("readSessionsCollection", "pwd");
+result = admin.runCommand({refreshLogicalSessionCacheNow: 1});
+assert.commandWorked(result, "could not force refresh");
+
+var config = conn.getDB("config");
+assert.eq(config.system.sessions.count(), 3, "should have refreshed all session records");
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js b/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js
index c0efd29ba42..b0fe7e7c4ac 100644
--- a/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js
+++ b/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js
@@ -9,44 +9,47 @@
*/
(function() {
- load("jstests/libs/get_index_helpers.js"); // For GetIndexHelpers.
-
- const baseName = 'reindex_crash_rebuilds_id_index';
- const collName = baseName;
- const dbpath = MongoRunner.dataPath + baseName + '/';
- resetDbpath(dbpath);
-
- const mongodOptions = {dbpath: dbpath, noCleanData: true};
- let conn = MongoRunner.runMongod(mongodOptions);
-
- let testDB = conn.getDB('test');
- let testColl = testDB.getCollection(collName);
-
- // Insert a single document and create the collection.
- testColl.insert({a: 1});
- let spec = GetIndexHelpers.findByKeyPattern(testColl.getIndexes(), {_id: 1});
- assert.neq(null, spec, "_id index not found");
- assert.eq("_id_", spec.name, tojson(spec));
-
- // Enable a failpoint that causes reIndex to crash after dropping the indexes but before
- // rebuilding them.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'reIndexCrashAfterDrop', mode: 'alwaysOn'}));
- assert.throws(() => testColl.runCommand({reIndex: collName}));
-
- // The server should have crashed from the failpoint.
- MongoRunner.stopMongod(conn, null, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
-
- // The server should start up successfully after rebuilding the _id index.
- conn = MongoRunner.runMongod(mongodOptions);
- testDB = conn.getDB('test');
- testColl = testDB.getCollection(collName);
- assert(testColl.exists());
-
- // The _id index should exist.
- spec = GetIndexHelpers.findByKeyPattern(testColl.getIndexes(), {_id: 1});
- assert.neq(null, spec, "_id index not found");
- assert.eq("_id_", spec.name, tojson(spec));
-
- MongoRunner.stopMongod(conn);
+load("jstests/libs/get_index_helpers.js"); // For GetIndexHelpers.
+
+const baseName = 'reindex_crash_rebuilds_id_index';
+const collName = baseName;
+const dbpath = MongoRunner.dataPath + baseName + '/';
+resetDbpath(dbpath);
+
+const mongodOptions = {
+ dbpath: dbpath,
+ noCleanData: true
+};
+let conn = MongoRunner.runMongod(mongodOptions);
+
+let testDB = conn.getDB('test');
+let testColl = testDB.getCollection(collName);
+
+// Insert a single document and create the collection.
+testColl.insert({a: 1});
+let spec = GetIndexHelpers.findByKeyPattern(testColl.getIndexes(), {_id: 1});
+assert.neq(null, spec, "_id index not found");
+assert.eq("_id_", spec.name, tojson(spec));
+
+// Enable a failpoint that causes reIndex to crash after dropping the indexes but before
+// rebuilding them.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'reIndexCrashAfterDrop', mode: 'alwaysOn'}));
+assert.throws(() => testColl.runCommand({reIndex: collName}));
+
+// The server should have crashed from the failpoint.
+MongoRunner.stopMongod(conn, null, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
+
+// The server should start up successfully after rebuilding the _id index.
+conn = MongoRunner.runMongod(mongodOptions);
+testDB = conn.getDB('test');
+testColl = testDB.getCollection(collName);
+assert(testColl.exists());
+
+// The _id index should exist.
+spec = GetIndexHelpers.findByKeyPattern(testColl.getIndexes(), {_id: 1});
+assert.neq(null, spec, "_id index not found");
+assert.eq("_id_", spec.name, tojson(spec));
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/repair_flag_transport_layer.js b/jstests/noPassthrough/repair_flag_transport_layer.js
index 987b07cd8d5..4898da084b0 100644
--- a/jstests/noPassthrough/repair_flag_transport_layer.js
+++ b/jstests/noPassthrough/repair_flag_transport_layer.js
@@ -3,20 +3,18 @@
*/
(function() {
- "use strict";
- let dbpath = MongoRunner.dataPath + "repair_flag_transport_layer";
- resetDbpath(dbpath);
+"use strict";
+let dbpath = MongoRunner.dataPath + "repair_flag_transport_layer";
+resetDbpath(dbpath);
- function runTest(conn) {
- let returnCode =
- runNonMongoProgram("mongod", "--port", conn.port, "--repair", "--dbpath", dbpath);
- assert.eq(
- returnCode, 0, "expected mongod --repair to execute successfully regardless of port");
- }
+function runTest(conn) {
+ let returnCode =
+ runNonMongoProgram("mongod", "--port", conn.port, "--repair", "--dbpath", dbpath);
+ assert.eq(returnCode, 0, "expected mongod --repair to execute successfully regardless of port");
+}
- let conn = MongoRunner.runMongod();
-
- runTest(conn);
- MongoRunner.stopMongod(conn);
+let conn = MongoRunner.runMongod();
+runTest(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/repl_set_resize_oplog.js b/jstests/noPassthrough/repl_set_resize_oplog.js
index 23682467f9d..0720746e732 100644
--- a/jstests/noPassthrough/repl_set_resize_oplog.js
+++ b/jstests/noPassthrough/repl_set_resize_oplog.js
@@ -4,44 +4,42 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- "use strict";
+"use strict";
- let replSet = new ReplSetTest({nodes: 2, oplogSize: 50});
- replSet.startSet();
- replSet.initiate();
+let replSet = new ReplSetTest({nodes: 2, oplogSize: 50});
+replSet.startSet();
+replSet.initiate();
- let primary = replSet.getPrimary();
+let primary = replSet.getPrimary();
- const MB = 1024 * 1024;
- const GB = 1024 * MB;
- const PB = 1024 * GB;
- const EB = 1024 * PB;
+const MB = 1024 * 1024;
+const GB = 1024 * MB;
+const PB = 1024 * GB;
+const EB = 1024 * PB;
- assert.eq(primary.getDB('local').oplog.rs.stats().maxSize, 50 * MB);
+assert.eq(primary.getDB('local').oplog.rs.stats().maxSize, 50 * MB);
- // Too small: 990MB
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 900}),
- ErrorCodes.InvalidOptions,
- "Expected replSetResizeOplog to fail because the size was too small");
+// Too small: 990MB
+assert.commandFailedWithCode(primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 900}),
+ ErrorCodes.InvalidOptions,
+ "Expected replSetResizeOplog to fail because the size was too small");
- // Way too small: -1GB
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: -1 * GB / MB}),
- ErrorCodes.InvalidOptions,
- "Expected replSetResizeOplog to fail because the size was too small");
+// Way too small: -1GB
+assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: -1 * GB / MB}),
+ ErrorCodes.InvalidOptions,
+ "Expected replSetResizeOplog to fail because the size was too small");
- // Too big: 8EB
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 8 * EB / MB}),
- ErrorCodes.InvalidOptions,
- "Expected replSetResizeOplog to fail because the size was too big");
+// Too big: 8EB
+assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 8 * EB / MB}),
+ ErrorCodes.InvalidOptions,
+ "Expected replSetResizeOplog to fail because the size was too big");
- // The maximum: 1PB
- assert.commandWorked(
- primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 1 * PB / MB}));
+// The maximum: 1PB
+assert.commandWorked(primary.getDB('admin').runCommand({replSetResizeOplog: 1, size: 1 * PB / MB}));
- assert.eq(primary.getDB('local').oplog.rs.stats().maxSize, 1 * PB);
+assert.eq(primary.getDB('local').oplog.rs.stats().maxSize, 1 * PB);
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/repl_write_threads_start_param.js b/jstests/noPassthrough/repl_write_threads_start_param.js
index e18d8de1259..f80f0f81655 100644
--- a/jstests/noPassthrough/repl_write_threads_start_param.js
+++ b/jstests/noPassthrough/repl_write_threads_start_param.js
@@ -5,37 +5,36 @@
// 4) cannot be altered at run time
(function() {
- "use strict";
+"use strict";
- // too low a count
- clearRawMongoProgramOutput();
- var mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=0'});
- assert.soon(function() {
- return rawMongoProgramOutput().match(
- "Invalid value for parameter replWriterThreadCount: 0 is not greater than or equal to 1");
- }, "mongod started with too low a value for replWriterThreadCount");
+// too low a count
+clearRawMongoProgramOutput();
+var mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=0'});
+assert.soon(function() {
+ return rawMongoProgramOutput().match(
+ "Invalid value for parameter replWriterThreadCount: 0 is not greater than or equal to 1");
+}, "mongod started with too low a value for replWriterThreadCount");
- // too high a count
- clearRawMongoProgramOutput();
- mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=257'});
- assert.soon(function() {
- return rawMongoProgramOutput().match(
- "Invalid value for parameter replWriterThreadCount: 257 is not less than or equal to 256");
- }, "mongod started with too high a value for replWriterThreadCount");
+// too high a count
+clearRawMongoProgramOutput();
+mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=257'});
+assert.soon(function() {
+ return rawMongoProgramOutput().match(
+ "Invalid value for parameter replWriterThreadCount: 257 is not less than or equal to 256");
+}, "mongod started with too high a value for replWriterThreadCount");
- // proper count
- clearRawMongoProgramOutput();
- mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=24'});
- assert.neq(null, mongo, "mongod failed to start with a suitable replWriterThreadCount value");
- assert(!rawMongoProgramOutput().match("Invalid value for parameter replWriterThreadCount"),
- "despite accepting the replWriterThreadCount value, mongod logged an error");
+// proper count
+clearRawMongoProgramOutput();
+mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=24'});
+assert.neq(null, mongo, "mongod failed to start with a suitable replWriterThreadCount value");
+assert(!rawMongoProgramOutput().match("Invalid value for parameter replWriterThreadCount"),
+ "despite accepting the replWriterThreadCount value, mongod logged an error");
- // getParameter to confirm the value was set
- var result = mongo.getDB("admin").runCommand({getParameter: 1, replWriterThreadCount: 1});
- assert.eq(24, result.replWriterThreadCount, "replWriterThreadCount was not set internally");
+// getParameter to confirm the value was set
+var result = mongo.getDB("admin").runCommand({getParameter: 1, replWriterThreadCount: 1});
+assert.eq(24, result.replWriterThreadCount, "replWriterThreadCount was not set internally");
- // setParameter to ensure it is not possible
- assert.commandFailed(
- mongo.getDB("admin").runCommand({setParameter: 1, replWriterThreadCount: 1}));
- MongoRunner.stopMongod(mongo);
+// setParameter to ensure it is not possible
+assert.commandFailed(mongo.getDB("admin").runCommand({setParameter: 1, replWriterThreadCount: 1}));
+MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/replica_set_connection_error_codes.js b/jstests/noPassthrough/replica_set_connection_error_codes.js
index 7deebdfcc27..d431415ee6d 100644
--- a/jstests/noPassthrough/replica_set_connection_error_codes.js
+++ b/jstests/noPassthrough/replica_set_connection_error_codes.js
@@ -4,81 +4,80 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
-
- // Set the refresh period to 10 min to rule out races
- _setShellFailPoint({
- configureFailPoint: "modifyReplicaSetMonitorDefaultRefreshPeriod",
- mode: "alwaysOn",
- data: {
- period: 10 * 60,
- },
- });
-
- const rst = new ReplSetTest({
- nodes: 3,
- nodeOptions: {
- setParameter:
- {"failpoint.respondWithNotPrimaryInCommandDispatch": tojson({mode: "alwaysOn"})}
- }
- });
- rst.startSet();
- rst.initiate();
-
- const directConn = rst.getPrimary();
- const rsConn = new Mongo(rst.getURL());
- assert(rsConn.isReplicaSetConnection(),
- "expected " + rsConn.host + " to be a replica set connection string");
-
- const[secondary1, secondary2] = rst.getSecondaries();
-
- function stepDownPrimary(rst) {
- const awaitShell = startParallelShell(
- () => assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true})),
- directConn.port);
-
- // We wait for the primary to transition to the SECONDARY state to ensure we're waiting
- // until after the parallel shell has started the replSetStepDown command and the server is
- // paused at the failpoint.Do not attempt to reconnect to the node, since the node will be
- // holding the global X lock at the failpoint.
- const reconnectNode = false;
- rst.waitForState(directConn, ReplSetTest.State.SECONDARY, null, reconnectNode);
-
- return awaitShell;
+"use strict";
+
+// Set the refresh period to 10 min to rule out races
+_setShellFailPoint({
+ configureFailPoint: "modifyReplicaSetMonitorDefaultRefreshPeriod",
+ mode: "alwaysOn",
+ data: {
+ period: 10 * 60,
+ },
+});
+
+const rst = new ReplSetTest({
+ nodes: 3,
+ nodeOptions: {
+ setParameter:
+ {"failpoint.respondWithNotPrimaryInCommandDispatch": tojson({mode: "alwaysOn"})}
}
-
- const failpoint = "stepdownHangBeforePerformingPostMemberStateUpdateActions";
- assert.commandWorked(
- directConn.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
-
- const awaitShell = stepDownPrimary(rst);
-
- // Wait for a new primary to be elected and agreed upon by nodes.
- rst.getPrimary();
- rst.awaitNodesAgreeOnPrimary();
-
- // DBClientRS will continue to send command requests to the node it believed to be primary even
- // after it stepped down so long as it hasn't closed its connection.
- assert.commandFailedWithCode(rsConn.getDB("test").runCommand({create: "mycoll"}),
- ErrorCodes.NotMaster);
-
- // However, once the server responds back with a ErrorCodes.NotMaster error, DBClientRS will
- // cause the ReplicaSetMonitor to attempt to discover the current primary.
- assert.commandWorked(rsConn.getDB("test").runCommand({create: "mycoll"}));
-
- try {
- assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- } catch (e) {
- if (!isNetworkError(e)) {
- throw e;
- }
-
- // We ignore network errors because it's possible that depending on how quickly the server
- // closes connections that the connection would get closed before the server has a chance to
- // respond to the configureFailPoint command with ok=1.
+});
+rst.startSet();
+rst.initiate();
+
+const directConn = rst.getPrimary();
+const rsConn = new Mongo(rst.getURL());
+assert(rsConn.isReplicaSetConnection(),
+ "expected " + rsConn.host + " to be a replica set connection string");
+
+const [secondary1, secondary2] = rst.getSecondaries();
+
+function stepDownPrimary(rst) {
+ const awaitShell = startParallelShell(
+ () => assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true})),
+ directConn.port);
+
+ // We wait for the primary to transition to the SECONDARY state to ensure we're waiting
+ // until after the parallel shell has started the replSetStepDown command and the server is
+ // paused at the failpoint.Do not attempt to reconnect to the node, since the node will be
+ // holding the global X lock at the failpoint.
+ const reconnectNode = false;
+ rst.waitForState(directConn, ReplSetTest.State.SECONDARY, null, reconnectNode);
+
+ return awaitShell;
+}
+
+const failpoint = "stepdownHangBeforePerformingPostMemberStateUpdateActions";
+assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+
+const awaitShell = stepDownPrimary(rst);
+
+// Wait for a new primary to be elected and agreed upon by nodes.
+rst.getPrimary();
+rst.awaitNodesAgreeOnPrimary();
+
+// DBClientRS will continue to send command requests to the node it believed to be primary even
+// after it stepped down so long as it hasn't closed its connection.
+assert.commandFailedWithCode(rsConn.getDB("test").runCommand({create: "mycoll"}),
+ ErrorCodes.NotMaster);
+
+// However, once the server responds back with a ErrorCodes.NotMaster error, DBClientRS will
+// cause the ReplicaSetMonitor to attempt to discover the current primary.
+assert.commandWorked(rsConn.getDB("test").runCommand({create: "mycoll"}));
+
+try {
+ assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+} catch (e) {
+ if (!isNetworkError(e)) {
+ throw e;
}
- awaitShell();
+ // We ignore network errors because it's possible that depending on how quickly the server
+ // closes connections that the connection would get closed before the server has a chance to
+ // respond to the configureFailPoint command with ok=1.
+}
+
+awaitShell();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/replica_set_connection_getmore.js b/jstests/noPassthrough/replica_set_connection_getmore.js
index acc1d7e31c3..e7167fbd5eb 100644
--- a/jstests/noPassthrough/replica_set_connection_getmore.js
+++ b/jstests/noPassthrough/replica_set_connection_getmore.js
@@ -4,44 +4,44 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const dbName = "test";
- const collName = "getmore";
-
- // We create our own replica set connection because 'rst.nodes' is an array of direct
- // connections to each individual node.
- var conn = new Mongo(rst.getURL());
-
- // We force a read mode of "compatibility" so that we can test Mongo.prototype.readMode()
- // resolves to "commands" independently of the --readMode passed to the mongo shell running this
- // test.
- conn.forceReadMode("compatibility");
- assert.eq("commands",
- conn.readMode(),
- "replica set connections created by the mongo shell should use 'commands' read mode");
- var coll = conn.getDB(dbName)[collName];
- coll.drop();
-
- // Insert several document so that we can use a cursor to fetch them in multiple batches.
- var res = coll.insert([{}, {}, {}, {}, {}]);
- assert.writeOK(res);
- assert.eq(5, res.nInserted);
-
- // Wait for the secondary to catch up because we're going to try and do reads from it.
- rst.awaitReplication();
-
- // Establish a cursor on the secondary and verify that the getMore operations are routed to it.
- var cursor = coll.find().readPref("secondary").batchSize(2);
- assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
-
- // Verify that queries work when the read mode is forced to "legacy" reads.
- conn.forceReadMode("legacy");
- var cursor = coll.find().readPref("secondary").batchSize(2);
- assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
-
- rst.stopSet();
+"use strict";
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const dbName = "test";
+const collName = "getmore";
+
+// We create our own replica set connection because 'rst.nodes' is an array of direct
+// connections to each individual node.
+var conn = new Mongo(rst.getURL());
+
+// We force a read mode of "compatibility" so that we can test Mongo.prototype.readMode()
+// resolves to "commands" independently of the --readMode passed to the mongo shell running this
+// test.
+conn.forceReadMode("compatibility");
+assert.eq("commands",
+ conn.readMode(),
+ "replica set connections created by the mongo shell should use 'commands' read mode");
+var coll = conn.getDB(dbName)[collName];
+coll.drop();
+
+// Insert several document so that we can use a cursor to fetch them in multiple batches.
+var res = coll.insert([{}, {}, {}, {}, {}]);
+assert.writeOK(res);
+assert.eq(5, res.nInserted);
+
+// Wait for the secondary to catch up because we're going to try and do reads from it.
+rst.awaitReplication();
+
+// Establish a cursor on the secondary and verify that the getMore operations are routed to it.
+var cursor = coll.find().readPref("secondary").batchSize(2);
+assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
+
+// Verify that queries work when the read mode is forced to "legacy" reads.
+conn.forceReadMode("legacy");
+var cursor = coll.find().readPref("secondary").batchSize(2);
+assert.eq(5, cursor.itcount(), "failed to read the documents from the secondary");
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/replica_set_connection_stepdown.js b/jstests/noPassthrough/replica_set_connection_stepdown.js
index ab11d72d465..15fee060876 100644
--- a/jstests/noPassthrough/replica_set_connection_stepdown.js
+++ b/jstests/noPassthrough/replica_set_connection_stepdown.js
@@ -4,67 +4,66 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const directConn = rst.getPrimary();
- const rsConn = new Mongo(rst.getURL());
- assert(rsConn.isReplicaSetConnection(),
- "expected " + rsConn.host + " to be a replica set connection string");
+const directConn = rst.getPrimary();
+const rsConn = new Mongo(rst.getURL());
+assert(rsConn.isReplicaSetConnection(),
+ "expected " + rsConn.host + " to be a replica set connection string");
- function stepDownPrimary(rst) {
- const awaitShell = startParallelShell(
- () => assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true})),
- directConn.port);
+function stepDownPrimary(rst) {
+ const awaitShell = startParallelShell(
+ () => assert.commandWorked(db.adminCommand({replSetStepDown: 60, force: true})),
+ directConn.port);
- // We wait for the primary to transition to the SECONDARY state to ensure we're waiting
- // until after the parallel shell has started the replSetStepDown command and the server is
- // paused at the failpoint. Do not attempt to reconnect to the node, since the node will be
- // holding the global X lock at the failpoint.
- const reconnectNode = false;
- rst.waitForState(directConn, ReplSetTest.State.SECONDARY, null, reconnectNode);
+ // We wait for the primary to transition to the SECONDARY state to ensure we're waiting
+ // until after the parallel shell has started the replSetStepDown command and the server is
+ // paused at the failpoint. Do not attempt to reconnect to the node, since the node will be
+ // holding the global X lock at the failpoint.
+ const reconnectNode = false;
+ rst.waitForState(directConn, ReplSetTest.State.SECONDARY, null, reconnectNode);
- return awaitShell;
- }
-
- const failpoint = "stepdownHangBeforePerformingPostMemberStateUpdateActions";
- assert.commandWorked(
- directConn.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+ return awaitShell;
+}
- const awaitShell = stepDownPrimary(rst);
+const failpoint = "stepdownHangBeforePerformingPostMemberStateUpdateActions";
+assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- const error = assert.throws(function() {
- // DBClientRS will continue to send command requests to the node it believed to be primary
- // even after it stepped down so long as it hasn't closed its connection. But this may also
- // throw if the ReplicaSetMonitor's backgroud refresh has already noticed that this node is
- // no longer primary.
- assert.commandFailedWithCode(rsConn.getDB("test").runCommand({find: "mycoll"}),
- ErrorCodes.NotMasterNoSlaveOk);
+const awaitShell = stepDownPrimary(rst);
- // However, once the server responds back with a "not master" error, DBClientRS will cause
- // the ReplicaSetMonitor to attempt to discover the current primary, which will cause this
- // to definitely throw.
- rsConn.getDB("test").runCommand({find: "mycoll"});
- });
- assert(/Could not find host/.test(error.toString()),
- "find command failed for a reason other than being unable to discover a new primary: " +
- tojson(error));
+const error = assert.throws(function() {
+ // DBClientRS will continue to send command requests to the node it believed to be primary
+ // even after it stepped down so long as it hasn't closed its connection. But this may also
+ // throw if the ReplicaSetMonitor's backgroud refresh has already noticed that this node is
+ // no longer primary.
+ assert.commandFailedWithCode(rsConn.getDB("test").runCommand({find: "mycoll"}),
+ ErrorCodes.NotMasterNoSlaveOk);
- try {
- assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- } catch (e) {
- if (!isNetworkError(e)) {
- throw e;
- }
+ // However, once the server responds back with a "not master" error, DBClientRS will cause
+ // the ReplicaSetMonitor to attempt to discover the current primary, which will cause this
+ // to definitely throw.
+ rsConn.getDB("test").runCommand({find: "mycoll"});
+});
+assert(/Could not find host/.test(error.toString()),
+ "find command failed for a reason other than being unable to discover a new primary: " +
+ tojson(error));
- // We ignore network errors because it's possible that depending on how quickly the server
- // closes connections that the connection would get closed before the server has a chance to
- // respond to the configureFailPoint command with ok=1.
+try {
+ assert.commandWorked(directConn.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+} catch (e) {
+ if (!isNetworkError(e)) {
+ throw e;
}
- awaitShell();
- rst.stopSet();
+ // We ignore network errors because it's possible that depending on how quickly the server
+ // closes connections that the connection would get closed before the server has a chance to
+ // respond to the configureFailPoint command with ok=1.
+}
+
+awaitShell();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/report_post_batch_resume_token_mongod.js b/jstests/noPassthrough/report_post_batch_resume_token_mongod.js
index cf7dd55b1d0..389151169a6 100644
--- a/jstests/noPassthrough/report_post_batch_resume_token_mongod.js
+++ b/jstests/noPassthrough/report_post_batch_resume_token_mongod.js
@@ -3,112 +3,112 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- // Create a new single-node replica set, and ensure that it can support $changeStream.
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- rst.stopSet();
- return;
- }
- rst.initiate();
-
- const db = rst.getPrimary().getDB(jsTestName());
- const collName = "report_post_batch_resume_token";
- const testCollection = assertDropAndRecreateCollection(db, collName);
- const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
- const adminDB = db.getSiblingDB("admin");
-
- let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
- const batchSize = 2;
-
- // Start watching the test collection in order to capture a resume token.
- let csCursor = testCollection.watch();
-
- // Write some documents to the test collection and get the resume token from the first doc.
- for (let i = 0; i < 5; ++i) {
- assert.commandWorked(testCollection.insert({_id: docId++}));
- }
- const resumeTokenFromDoc = csCursor.next()._id;
- csCursor.close();
-
- // Test that postBatchResumeToken is present on a non-empty initial aggregate batch.
- assert.soon(() => {
- csCursor = testCollection.watch([], {resumeAfter: resumeTokenFromDoc});
- csCursor.close(); // We don't need any results after the initial batch.
- return csCursor.objsLeftInBatch();
- });
- while (csCursor.objsLeftInBatch()) {
- csCursor.next();
- }
- let initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
-
- // Test that the PBRT is correctly updated when reading events from within a transaction.
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(db.getName());
-
- const sessionColl = sessionDB[testCollection.getName()];
- const sessionOtherColl = sessionDB[otherCollection.getName()];
- session.startTransaction();
-
- // Open a stream of batchSize:2 and grab the PBRT of the initial batch.
- csCursor = testCollection.watch([], {cursor: {batchSize: batchSize}});
- initialAggPBRT = csCursor.getResumeToken();
- assert.eq(csCursor.objsLeftInBatch(), 0);
-
- // Write 3 documents to testCollection and 1 to the unrelated collection within the transaction.
- for (let i = 0; i < 3; ++i) {
- assert.commandWorked(sessionColl.insert({_id: docId++}));
- }
- assert.commandWorked(sessionOtherColl.insert({}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
-
- // Grab the next 2 events, which should be the first 2 events in the transaction.
- assert(csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 2);
-
- // The clusterTime should be the same on each, but the resume token keeps advancing.
- const txnEvent1 = csCursor.next(), txnEvent2 = csCursor.next();
- const txnClusterTime = txnEvent1.clusterTime;
- assert.eq(txnEvent2.clusterTime, txnClusterTime);
- assert.gt(bsonWoCompare(txnEvent1._id, initialAggPBRT), 0);
- assert.gt(bsonWoCompare(txnEvent2._id, txnEvent1._id), 0);
-
- // The PBRT of the first transaction batch is equal to the last document's resumeToken.
- let getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(getMorePBRT, txnEvent2._id), 0);
-
- // Save this PBRT so that we can test resuming from it later on.
- const resumePBRT = getMorePBRT;
-
- // Now get the next batch. This contains the third of the four transaction operations.
- let previousGetMorePBRT = getMorePBRT;
- assert(csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 1);
-
- // The clusterTime of this event is the same as the two events from the previous batch, but its
- // resume token is greater than the previous PBRT.
- const txnEvent3 = csCursor.next();
- assert.eq(txnEvent3.clusterTime, txnClusterTime);
- assert.gt(bsonWoCompare(txnEvent3._id, previousGetMorePBRT), 0);
-
- // Because we wrote to the unrelated collection, the final event in the transaction does not
- // appear in the batch. But in this case it also does not allow our PBRT to advance beyond the
- // last event in the batch, because the unrelated event is within the same transaction and
- // therefore has the same clusterTime.
- getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(getMorePBRT, txnEvent3._id), 0);
-
- // Confirm that resuming from the PBRT of the first batch gives us the third transaction write.
- csCursor = testCollection.watch([], {resumeAfter: resumePBRT});
- assert.docEq(csCursor.next(), txnEvent3);
- assert(!csCursor.hasNext());
+"use strict";
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+// Create a new single-node replica set, and ensure that it can support $changeStream.
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
rst.stopSet();
+ return;
+}
+rst.initiate();
+
+const db = rst.getPrimary().getDB(jsTestName());
+const collName = "report_post_batch_resume_token";
+const testCollection = assertDropAndRecreateCollection(db, collName);
+const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
+const adminDB = db.getSiblingDB("admin");
+
+let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
+const batchSize = 2;
+
+// Start watching the test collection in order to capture a resume token.
+let csCursor = testCollection.watch();
+
+// Write some documents to the test collection and get the resume token from the first doc.
+for (let i = 0; i < 5; ++i) {
+ assert.commandWorked(testCollection.insert({_id: docId++}));
+}
+const resumeTokenFromDoc = csCursor.next()._id;
+csCursor.close();
+
+// Test that postBatchResumeToken is present on a non-empty initial aggregate batch.
+assert.soon(() => {
+ csCursor = testCollection.watch([], {resumeAfter: resumeTokenFromDoc});
+ csCursor.close(); // We don't need any results after the initial batch.
+ return csCursor.objsLeftInBatch();
+});
+while (csCursor.objsLeftInBatch()) {
+ csCursor.next();
+}
+let initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+
+// Test that the PBRT is correctly updated when reading events from within a transaction.
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(db.getName());
+
+const sessionColl = sessionDB[testCollection.getName()];
+const sessionOtherColl = sessionDB[otherCollection.getName()];
+session.startTransaction();
+
+// Open a stream of batchSize:2 and grab the PBRT of the initial batch.
+csCursor = testCollection.watch([], {cursor: {batchSize: batchSize}});
+initialAggPBRT = csCursor.getResumeToken();
+assert.eq(csCursor.objsLeftInBatch(), 0);
+
+// Write 3 documents to testCollection and 1 to the unrelated collection within the transaction.
+for (let i = 0; i < 3; ++i) {
+ assert.commandWorked(sessionColl.insert({_id: docId++}));
+}
+assert.commandWorked(sessionOtherColl.insert({}));
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
+
+// Grab the next 2 events, which should be the first 2 events in the transaction.
+assert(csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 2);
+
+// The clusterTime should be the same on each, but the resume token keeps advancing.
+const txnEvent1 = csCursor.next(), txnEvent2 = csCursor.next();
+const txnClusterTime = txnEvent1.clusterTime;
+assert.eq(txnEvent2.clusterTime, txnClusterTime);
+assert.gt(bsonWoCompare(txnEvent1._id, initialAggPBRT), 0);
+assert.gt(bsonWoCompare(txnEvent2._id, txnEvent1._id), 0);
+
+// The PBRT of the first transaction batch is equal to the last document's resumeToken.
+let getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(getMorePBRT, txnEvent2._id), 0);
+
+// Save this PBRT so that we can test resuming from it later on.
+const resumePBRT = getMorePBRT;
+
+// Now get the next batch. This contains the third of the four transaction operations.
+let previousGetMorePBRT = getMorePBRT;
+assert(csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 1);
+
+// The clusterTime of this event is the same as the two events from the previous batch, but its
+// resume token is greater than the previous PBRT.
+const txnEvent3 = csCursor.next();
+assert.eq(txnEvent3.clusterTime, txnClusterTime);
+assert.gt(bsonWoCompare(txnEvent3._id, previousGetMorePBRT), 0);
+
+// Because we wrote to the unrelated collection, the final event in the transaction does not
+// appear in the batch. But in this case it also does not allow our PBRT to advance beyond the
+// last event in the batch, because the unrelated event is within the same transaction and
+// therefore has the same clusterTime.
+getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(getMorePBRT, txnEvent3._id), 0);
+
+// Confirm that resuming from the PBRT of the first batch gives us the third transaction write.
+csCursor = testCollection.watch([], {resumeAfter: resumePBRT});
+assert.docEq(csCursor.next(), txnEvent3);
+assert(!csCursor.hasNext());
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/restart_catalog_preserves_min_visible.js b/jstests/noPassthrough/restart_catalog_preserves_min_visible.js
index 4020873089d..18127ce27d1 100644
--- a/jstests/noPassthrough/restart_catalog_preserves_min_visible.js
+++ b/jstests/noPassthrough/restart_catalog_preserves_min_visible.js
@@ -12,33 +12,34 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- let replSet = new ReplSetTest({name: "server35317", nodes: 1});
- replSet.startSet();
- replSet.initiate();
+let replSet = new ReplSetTest({name: "server35317", nodes: 1});
+replSet.startSet();
+replSet.initiate();
- let prim = replSet.getPrimary();
- let beforeIndexBuild = assert.commandWorked(prim.adminCommand(
- {configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn"}))["operationTime"];
- assert.commandWorked(prim.getDB("test").coll.insert({c: 1}));
- assert.commandWorked(prim.getDB("test").coll.createIndex({c: 1}));
- assert.commandWorked(prim.adminCommand({restartCatalog: 1}));
+let prim = replSet.getPrimary();
+let beforeIndexBuild = assert.commandWorked(prim.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn"
+}))["operationTime"];
+assert.commandWorked(prim.getDB("test").coll.insert({c: 1}));
+assert.commandWorked(prim.getDB("test").coll.createIndex({c: 1}));
+assert.commandWorked(prim.adminCommand({restartCatalog: 1}));
- let session = prim.startSession({causalConsistency: false});
- let sessionDb = session.getDatabase("test");
- // Prior to fixing SERVER-35317, this would crash a debug build, or return success on a
- // non-debug build. Now it should return an error. Specifically, this fails because we're
- // trying to read behind the minimum visible snapshot timestamp for the `test.coll`
- // collection.
- assert.commandFailed(sessionDb.runCommand({
- find: "coll",
- filter: {c: 1},
- readConcern: {level: "snapshot", atClusterTime: beforeIndexBuild},
- txnNumber: NumberLong(0)
- }));
+let session = prim.startSession({causalConsistency: false});
+let sessionDb = session.getDatabase("test");
+// Prior to fixing SERVER-35317, this would crash a debug build, or return success on a
+// non-debug build. Now it should return an error. Specifically, this fails because we're
+// trying to read behind the minimum visible snapshot timestamp for the `test.coll`
+// collection.
+assert.commandFailed(sessionDb.runCommand({
+ find: "coll",
+ filter: {c: 1},
+ readConcern: {level: "snapshot", atClusterTime: beforeIndexBuild},
+ txnNumber: NumberLong(0)
+}));
- session.endSession();
- replSet.stopSet();
+session.endSession();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/restart_catalog_sharded_cluster.js b/jstests/noPassthrough/restart_catalog_sharded_cluster.js
index 696d62c2af8..782fa9aa913 100644
--- a/jstests/noPassthrough/restart_catalog_sharded_cluster.js
+++ b/jstests/noPassthrough/restart_catalog_sharded_cluster.js
@@ -3,211 +3,213 @@
* @tags: [requires_replication, requires_sharding, requires_majority_read_concern]
*/
(function() {
- "use strict";
-
- // Only run this test if the storage engine is "wiredTiger" or "inMemory".
- const acceptedStorageEngines = ["wiredTiger", "inMemory"];
- const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (!acceptedStorageEngines.includes(currentStorageEngine)) {
- jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine +
- " storage engine");
- return;
+"use strict";
+
+// Only run this test if the storage engine is "wiredTiger" or "inMemory".
+const acceptedStorageEngines = ["wiredTiger", "inMemory"];
+const currentStorageEngine = jsTest.options().storageEngine || "wiredTiger";
+if (!acceptedStorageEngines.includes(currentStorageEngine)) {
+ jsTest.log("Refusing to run restartCatalog test on " + currentStorageEngine +
+ " storage engine");
+ return;
+}
+
+// Helper function for sorting documents in JavaScript.
+function sortOn(fieldName) {
+ return (doc1, doc2) => {
+ return bsonWoCompare({_: doc1[fieldName]}, {_: doc2[fieldName]});
+ };
+}
+
+const st = new ShardingTest({
+ name: "restart_catalog_sharded_cluster",
+ mongos: 1,
+ config: 1,
+ shards: {
+ rs: true,
+ rs0: {nodes: 1},
+ rs1: {nodes: 1},
+ },
+ other: {
+ enableBalancer: false,
+ configOptions: {setParameter: "enableTestCommands=1"},
+ shardOptions: {setParameter: "enableTestCommands=1"},
}
-
- // Helper function for sorting documents in JavaScript.
- function sortOn(fieldName) {
- return (doc1, doc2) => {
- return bsonWoCompare({_: doc1[fieldName]}, {_: doc2[fieldName]});
- };
- }
-
- const st = new ShardingTest({
- name: "restart_catalog_sharded_cluster",
- mongos: 1,
- config: 1,
- shards: {
- rs: true,
- rs0: {nodes: 1},
- rs1: {nodes: 1},
- },
- other: {
- enableBalancer: false,
- configOptions: {setParameter: "enableTestCommands=1"},
- shardOptions: {setParameter: "enableTestCommands=1"},
- }
- });
- const mongos = st.s0;
- const shard0 = st.shard0;
- const shard1 = st.shard1;
-
- const dbName = "drinks";
-
- // Create a sharded collection and distribute chunks amongst the shards.
- const coffees = [
- {_id: "americano", price: 1.5},
- {_id: "espresso", price: 2.0},
- {_id: "starbucks", price: 1000.0}
- ];
- const coffeeColl = mongos.getDB(dbName).getCollection("coffee");
- assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, shard0.shardName);
- assert.commandWorked(
- mongos.adminCommand({shardCollection: coffeeColl.getFullName(), key: {price: 1}}));
- const splitPoint = 50.0;
- assert.commandWorked(
- mongos.adminCommand({split: coffeeColl.getFullName(), middle: {price: splitPoint}}));
- for (let coffee of coffees) {
- assert.commandWorked(coffeeColl.insert(coffee, {writeConcern: {w: "majority"}}));
- }
- assert.commandWorked(mongos.adminCommand({
- moveChunk: coffeeColl.getFullName(),
- find: {price: 1000.0},
- to: shard1.shardName,
- _waitForDelete: true
- }));
- assert.commandWorked(mongos.adminCommand({
- moveChunk: coffeeColl.getFullName(),
- find: {price: 0.0},
- to: shard0.shardName,
- _waitForDelete: true
- }));
-
- // Create an unsharded collection and throw some data in.
- const teaColl = mongos.getDB(dbName).getCollection("tea");
- const teas = [
- {_id: "darjeeling", price: 2.0},
- {_id: "earl gray", price: 1.5},
- {_id: "sencha", price: 3.5}
- ];
- for (let tea of teas) {
- assert.commandWorked(teaColl.insert(tea, {writeConcern: {w: "majority"}}));
- }
-
- // Run queries on both the sharded and unsharded collection.
- function assertShardsHaveExpectedData() {
- const dbShard0 = shard0.getDB(dbName);
- const dbShard1 = shard1.getDB(dbName);
-
- // Assert that we can find all documents in the unsharded collection by either asking
- // mongos, or consulting the primary shard directly.
- assert.eq(teaColl.find().sort({_id: 1}).readConcern("majority").toArray(),
- teas.sort(sortOn("_id")),
- "couldn't find all unsharded data via mongos");
- assert.eq(dbShard0.tea.find().sort({_id: 1}).toArray(),
- teas.sort(sortOn("_id")),
- "couldn't find all unsharded data directly via primary shard");
- assert.eq(teaColl.find().sort({price: 1}).toArray(), teas.sort(sortOn("price")));
-
- // Assert that we can find all documents in the sharded collection via scatter-gather.
- assert.eq(coffeeColl.find().sort({_id: 1}).readConcern("majority").toArray(),
- coffees.sort(sortOn("_id")),
- "couldn't find all sharded data via mongos scatter-gather");
-
- // Assert that we can find all documents via a query that targets multiple shards.
- assert.eq(coffeeColl.find({price: {$gt: 0}}).sort({price: 1}).toArray(),
- coffees.sort(sortOn("price")),
- "couldn't find all sharded data via mongos multi-shard targeted query");
-
- // Assert that we can find all sharded documents on shard0 by shard targeting via mongos,
- // and by consulting shard0 directly.
- const dataShard0 = coffees.filter(drink => drink.price < splitPoint).sort(sortOn("_id"));
- assert.eq(coffeeColl.find({price: {$lt: splitPoint}}).sort({_id: 1}).toArray(),
- dataShard0,
- "couldn't find shard0 data via targeting through mongos");
- jsTest.log(tojson(dbShard0.getCollectionInfos()));
- assert.eq(dbShard0.coffee.find().toArray(),
- dataShard0,
- "couldn't find shard0 data by directly asking shard0");
-
- // Assert that we can find all sharded documents on shard1 by shard targeting via mongos,
- // and by consulting shard1 directly.
- const dataShard1 = coffees.filter(drink => drink.price >= splitPoint).sort(sortOn("_id"));
- assert.eq(coffeeColl.find({price: {$gte: splitPoint}}).sort({_id: 1}).toArray(),
- dataShard1,
- "couldn't find shard1 data via targeting through mongos");
- assert.eq(dbShard1.coffee.find().toArray(),
- dataShard1,
- "couldn't find shard1 data by directly asking shard1");
- }
- assertShardsHaveExpectedData();
-
- // Run queries on the metadata stored in the config servers.
- function assertConfigServersHaveExpectedData() {
- const configDBViaMongos = mongos.getDB("config");
- const configDBViaConfigSvr = st.config0.getDB("config");
- const projectOnlyShard = {_id: 0, shard: 1};
-
- // Assert that we can find documents for chunk metadata, both via mongos and by asking the
- // config server primary directly.
- const smallestChunk = {"max.price": splitPoint};
- const smallestChunkShard = {shard: "restart_catalog_sharded_cluster-rs0"};
- assert.eq(configDBViaMongos.chunks.find(smallestChunk, projectOnlyShard).toArray(),
- [smallestChunkShard]);
- assert.eq(configDBViaConfigSvr.chunks.find(smallestChunk, projectOnlyShard).toArray(),
- [smallestChunkShard]);
-
- const largestChunk = {"min.price": splitPoint};
- const largestChunkShard = {shard: "restart_catalog_sharded_cluster-rs1"};
- assert.eq(configDBViaMongos.chunks.find(largestChunk, projectOnlyShard).toArray(),
- [largestChunkShard]);
- assert.eq(configDBViaConfigSvr.chunks.find(largestChunk, projectOnlyShard).toArray(),
- [largestChunkShard]);
- }
- assertConfigServersHaveExpectedData();
-
- // Restart the catalog on the config server primary, then assert that both collection data and
- // sharding metadata are as expected.
- assert.commandWorked(st.config0.getDB("admin").runCommand({restartCatalog: 1}));
- assertConfigServersHaveExpectedData();
- assertShardsHaveExpectedData();
-
- // Remember what indexes are present, then restart the catalog on all shards via mongos.
- const teaIndexesBeforeRestart = teaColl.getIndexes().sort(sortOn("_id"));
- const coffeeIndexesBeforeRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
- assert.commandWorked(mongos.adminCommand({restartCatalog: 1}));
-
- // Verify that the data in the collections and the metadata have not changed.
- assertConfigServersHaveExpectedData();
- assertShardsHaveExpectedData();
-
- // Verify that both the sharded and unsharded collection have the same indexes as prior to the
- // restart.
- const teaIndexesAfterRestart = teaColl.getIndexes().sort(sortOn("_id"));
- assert.eq(teaIndexesBeforeRestart, teaIndexesAfterRestart);
- const coffeeIndexesAfterRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
- assert.eq(coffeeIndexesBeforeRestart, coffeeIndexesAfterRestart);
-
- // Create new indexes on both collections and verify that queries return the same results.
- [teaColl, coffeeColl].forEach(coll => {
- assert.commandWorked(coll.createIndex({price: -1}));
- assert.commandWorked(coll.createIndex({price: 1, _id: 1}));
- });
- assertShardsHaveExpectedData();
-
- // Modify the existing collections.
- const validator = {price: {$gt: 0}};
- [teaColl, coffeeColl].forEach(coll => {
- assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
- assert.writeErrorWithCode(coll.insert({price: -1}), ErrorCodes.DocumentValidationFailure);
- });
-
- // Perform another write, implicitly creating a new collection and database.
- const secondTestDB = mongos.getDB("restart_catalog_sharded_cluster_2");
- const foodColl = secondTestDB.getCollection("food");
- const doc = {_id: "apple", category: "fruit"};
- assert.commandWorked(foodColl.insert(doc));
- assert.commandWorked(foodColl.createIndex({category: 1}));
- assert.eq(foodColl.find().toArray(), [doc]);
-
- // Shard the new collection and verify we can find its data again.
- assert.commandWorked(mongos.adminCommand({enableSharding: secondTestDB.getName()}));
- assert.commandWorked(
- mongos.adminCommand({shardCollection: foodColl.getFullName(), key: {category: 1}}));
- assert.eq(foodColl.find().toArray(), [doc]);
-
- // Build a new index on the new collection.
- assert.commandWorked(foodColl.createIndex({category: -1}));
- assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]);
-
- st.stop();
+});
+const mongos = st.s0;
+const shard0 = st.shard0;
+const shard1 = st.shard1;
+
+const dbName = "drinks";
+
+// Create a sharded collection and distribute chunks amongst the shards.
+const coffees = [
+ {_id: "americano", price: 1.5},
+ {_id: "espresso", price: 2.0},
+ {_id: "starbucks", price: 1000.0}
+];
+const coffeeColl = mongos.getDB(dbName).getCollection("coffee");
+assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, shard0.shardName);
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: coffeeColl.getFullName(), key: {price: 1}}));
+const splitPoint = 50.0;
+assert.commandWorked(
+ mongos.adminCommand({split: coffeeColl.getFullName(), middle: {price: splitPoint}}));
+for (let coffee of coffees) {
+ assert.commandWorked(coffeeColl.insert(coffee, {writeConcern: {w: "majority"}}));
+}
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: coffeeColl.getFullName(),
+ find: {price: 1000.0},
+ to: shard1.shardName,
+ _waitForDelete: true
+}));
+assert.commandWorked(mongos.adminCommand({
+ moveChunk: coffeeColl.getFullName(),
+ find: {price: 0.0},
+ to: shard0.shardName,
+ _waitForDelete: true
+}));
+
+// Create an unsharded collection and throw some data in.
+const teaColl = mongos.getDB(dbName).getCollection("tea");
+const teas =
+ [{_id: "darjeeling", price: 2.0}, {_id: "earl gray", price: 1.5}, {_id: "sencha", price: 3.5}];
+for (let tea of teas) {
+ assert.commandWorked(teaColl.insert(tea, {writeConcern: {w: "majority"}}));
+}
+
+// Run queries on both the sharded and unsharded collection.
+function assertShardsHaveExpectedData() {
+ const dbShard0 = shard0.getDB(dbName);
+ const dbShard1 = shard1.getDB(dbName);
+
+ // Assert that we can find all documents in the unsharded collection by either asking
+ // mongos, or consulting the primary shard directly.
+ assert.eq(teaColl.find().sort({_id: 1}).readConcern("majority").toArray(),
+ teas.sort(sortOn("_id")),
+ "couldn't find all unsharded data via mongos");
+ assert.eq(dbShard0.tea.find().sort({_id: 1}).toArray(),
+ teas.sort(sortOn("_id")),
+ "couldn't find all unsharded data directly via primary shard");
+ assert.eq(teaColl.find().sort({price: 1}).toArray(), teas.sort(sortOn("price")));
+
+ // Assert that we can find all documents in the sharded collection via scatter-gather.
+ assert.eq(coffeeColl.find().sort({_id: 1}).readConcern("majority").toArray(),
+ coffees.sort(sortOn("_id")),
+ "couldn't find all sharded data via mongos scatter-gather");
+
+ // Assert that we can find all documents via a query that targets multiple shards.
+ assert.eq(coffeeColl.find({price: {$gt: 0}}).sort({price: 1}).toArray(),
+ coffees.sort(sortOn("price")),
+ "couldn't find all sharded data via mongos multi-shard targeted query");
+
+ // Assert that we can find all sharded documents on shard0 by shard targeting via mongos,
+ // and by consulting shard0 directly.
+ const dataShard0 = coffees.filter(drink => drink.price < splitPoint).sort(sortOn("_id"));
+ assert.eq(coffeeColl.find({price: {$lt: splitPoint}}).sort({_id: 1}).toArray(),
+ dataShard0,
+ "couldn't find shard0 data via targeting through mongos");
+ jsTest.log(tojson(dbShard0.getCollectionInfos()));
+ assert.eq(dbShard0.coffee.find().toArray(),
+ dataShard0,
+ "couldn't find shard0 data by directly asking shard0");
+
+ // Assert that we can find all sharded documents on shard1 by shard targeting via mongos,
+ // and by consulting shard1 directly.
+ const dataShard1 = coffees.filter(drink => drink.price >= splitPoint).sort(sortOn("_id"));
+ assert.eq(coffeeColl.find({price: {$gte: splitPoint}}).sort({_id: 1}).toArray(),
+ dataShard1,
+ "couldn't find shard1 data via targeting through mongos");
+ assert.eq(dbShard1.coffee.find().toArray(),
+ dataShard1,
+ "couldn't find shard1 data by directly asking shard1");
+}
+assertShardsHaveExpectedData();
+
+// Run queries on the metadata stored in the config servers.
+function assertConfigServersHaveExpectedData() {
+ const configDBViaMongos = mongos.getDB("config");
+ const configDBViaConfigSvr = st.config0.getDB("config");
+ const projectOnlyShard = {_id: 0, shard: 1};
+
+ // Assert that we can find documents for chunk metadata, both via mongos and by asking the
+ // config server primary directly.
+ const smallestChunk = {"max.price": splitPoint};
+ const smallestChunkShard = {shard: "restart_catalog_sharded_cluster-rs0"};
+ assert.eq(configDBViaMongos.chunks.find(smallestChunk, projectOnlyShard).toArray(),
+ [smallestChunkShard]);
+ assert.eq(configDBViaConfigSvr.chunks.find(smallestChunk, projectOnlyShard).toArray(),
+ [smallestChunkShard]);
+
+ const largestChunk = {"min.price": splitPoint};
+ const largestChunkShard = {shard: "restart_catalog_sharded_cluster-rs1"};
+ assert.eq(configDBViaMongos.chunks.find(largestChunk, projectOnlyShard).toArray(),
+ [largestChunkShard]);
+ assert.eq(configDBViaConfigSvr.chunks.find(largestChunk, projectOnlyShard).toArray(),
+ [largestChunkShard]);
+}
+assertConfigServersHaveExpectedData();
+
+// Restart the catalog on the config server primary, then assert that both collection data and
+// sharding metadata are as expected.
+assert.commandWorked(st.config0.getDB("admin").runCommand({restartCatalog: 1}));
+assertConfigServersHaveExpectedData();
+assertShardsHaveExpectedData();
+
+// Remember what indexes are present, then restart the catalog on all shards via mongos.
+const teaIndexesBeforeRestart = teaColl.getIndexes().sort(sortOn("_id"));
+const coffeeIndexesBeforeRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
+assert.commandWorked(mongos.adminCommand({restartCatalog: 1}));
+
+// Verify that the data in the collections and the metadata have not changed.
+assertConfigServersHaveExpectedData();
+assertShardsHaveExpectedData();
+
+// Verify that both the sharded and unsharded collection have the same indexes as prior to the
+// restart.
+const teaIndexesAfterRestart = teaColl.getIndexes().sort(sortOn("_id"));
+assert.eq(teaIndexesBeforeRestart, teaIndexesAfterRestart);
+const coffeeIndexesAfterRestart = coffeeColl.getIndexes().sort(sortOn("_id"));
+assert.eq(coffeeIndexesBeforeRestart, coffeeIndexesAfterRestart);
+
+// Create new indexes on both collections and verify that queries return the same results.
+[teaColl, coffeeColl].forEach(coll => {
+ assert.commandWorked(coll.createIndex({price: -1}));
+ assert.commandWorked(coll.createIndex({price: 1, _id: 1}));
+});
+assertShardsHaveExpectedData();
+
+// Modify the existing collections.
+const validator = {
+ price: {$gt: 0}
+};
+[teaColl, coffeeColl].forEach(coll => {
+ assert.commandWorked(coll.runCommand("collMod", {validator: validator}));
+ assert.writeErrorWithCode(coll.insert({price: -1}), ErrorCodes.DocumentValidationFailure);
+});
+
+// Perform another write, implicitly creating a new collection and database.
+const secondTestDB = mongos.getDB("restart_catalog_sharded_cluster_2");
+const foodColl = secondTestDB.getCollection("food");
+const doc = {
+ _id: "apple",
+ category: "fruit"
+};
+assert.commandWorked(foodColl.insert(doc));
+assert.commandWorked(foodColl.createIndex({category: 1}));
+assert.eq(foodColl.find().toArray(), [doc]);
+
+// Shard the new collection and verify we can find its data again.
+assert.commandWorked(mongos.adminCommand({enableSharding: secondTestDB.getName()}));
+assert.commandWorked(
+ mongos.adminCommand({shardCollection: foodColl.getFullName(), key: {category: 1}}));
+assert.eq(foodColl.find().toArray(), [doc]);
+
+// Build a new index on the new collection.
+assert.commandWorked(foodColl.createIndex({category: -1}));
+assert.eq(foodColl.find().hint({category: -1}).toArray(), [doc]);
+
+st.stop();
}());
diff --git a/jstests/noPassthrough/restart_node_with_bridge.js b/jstests/noPassthrough/restart_node_with_bridge.js
index 004b595a208..e4398f7b13c 100644
--- a/jstests/noPassthrough/restart_node_with_bridge.js
+++ b/jstests/noPassthrough/restart_node_with_bridge.js
@@ -5,60 +5,59 @@
* @tags: [requires_persistence, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js"); // for reconnect
+load("jstests/replsets/rslib.js"); // for reconnect
- const rst = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0, votes: 0}}],
- useBridge: true,
- });
+const rst = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}],
+ useBridge: true,
+});
- rst.startSet();
- rst.initiate();
- rst.awaitNodesAgreeOnPrimary();
+rst.startSet();
+rst.initiate();
+rst.awaitNodesAgreeOnPrimary();
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
- const primaryDB = primary.getDB("test");
- const primaryColl = primaryDB.getCollection("restart_node_with_bridge");
+const primaryDB = primary.getDB("test");
+const primaryColl = primaryDB.getCollection("restart_node_with_bridge");
- function assertWriteReplicates() {
- assert.commandWorked(primaryColl.update(
- {_id: 0}, {$inc: {counter: 1}}, {upsert: true, writeConcern: {w: 2}}));
- }
+function assertWriteReplicates() {
+ assert.commandWorked(
+ primaryColl.update({_id: 0}, {$inc: {counter: 1}}, {upsert: true, writeConcern: {w: 2}}));
+}
- function assertWriteFailsToReplicate() {
- assert.commandFailedWithCode(
- primaryColl.update(
- {_id: 0}, {$inc: {counter: 1}}, {writeConcern: {w: 2, wtimeout: 1000}}),
- ErrorCodes.WriteConcernFailed);
- }
+function assertWriteFailsToReplicate() {
+ assert.commandFailedWithCode(
+ primaryColl.update({_id: 0}, {$inc: {counter: 1}}, {writeConcern: {w: 2, wtimeout: 1000}}),
+ ErrorCodes.WriteConcernFailed);
+}
- // By default, the primary should be connected to the secondary. Replicating a write should
- // therefore succeed.
- assertWriteReplicates();
+// By default, the primary should be connected to the secondary. Replicating a write should
+// therefore succeed.
+assertWriteReplicates();
- // We disconnect the primary from the secondary and verify that replicating a write fails.
- primary.disconnect(secondary);
- assertWriteFailsToReplicate();
+// We disconnect the primary from the secondary and verify that replicating a write fails.
+primary.disconnect(secondary);
+assertWriteFailsToReplicate();
- // We restart the secondary and verify that replicating a write still fails.
- rst.restart(secondary);
- assertWriteFailsToReplicate();
+// We restart the secondary and verify that replicating a write still fails.
+rst.restart(secondary);
+assertWriteFailsToReplicate();
- // We restart the primary and verify that replicating a write still fails.
- rst.restart(primary);
- rst.getPrimary();
- // Note that we specify 'primaryDB' to avoid having reconnect() send a message directly to the
- // mongod process rather than going through the mongobridge process as well.
- reconnect(primaryDB);
- assertWriteFailsToReplicate();
+// We restart the primary and verify that replicating a write still fails.
+rst.restart(primary);
+rst.getPrimary();
+// Note that we specify 'primaryDB' to avoid having reconnect() send a message directly to the
+// mongod process rather than going through the mongobridge process as well.
+reconnect(primaryDB);
+assertWriteFailsToReplicate();
- // We reconnect the primary to the secondary and verify that replicating a write succeeds.
- primary.reconnect(secondary);
- assertWriteReplicates();
+// We reconnect the primary to the secondary and verify that replicating a write succeeds.
+primary.reconnect(secondary);
+assertWriteReplicates();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/retry_network_error_test.js b/jstests/noPassthrough/retry_network_error_test.js
index df8da41a5d4..e8fe4a78047 100644
--- a/jstests/noPassthrough/retry_network_error_test.js
+++ b/jstests/noPassthrough/retry_network_error_test.js
@@ -4,44 +4,43 @@
*/
(function() {
- "use strict";
- let node = MongoRunner.runMongod();
- let hostname = node.host;
+"use strict";
+let node = MongoRunner.runMongod();
+let hostname = node.host;
- jsTestLog("Test connecting to a healthy node.");
- let numRetries = 5;
- let sleepMs = 50;
- let attempts = 0;
+jsTestLog("Test connecting to a healthy node.");
+let numRetries = 5;
+let sleepMs = 50;
+let attempts = 0;
+retryOnNetworkError(function() {
+ attempts++;
+ new Mongo(hostname);
+}, numRetries, sleepMs);
+assert.eq(attempts, 1);
+
+jsTestLog("Test connecting to a node that is down.");
+MongoRunner.stopMongod(node);
+attempts = 0;
+try {
retryOnNetworkError(function() {
attempts++;
new Mongo(hostname);
}, numRetries, sleepMs);
- assert.eq(attempts, 1);
-
- jsTestLog("Test connecting to a node that is down.");
- MongoRunner.stopMongod(node);
- attempts = 0;
- try {
- retryOnNetworkError(function() {
- attempts++;
- new Mongo(hostname);
- }, numRetries, sleepMs);
- } catch (e) {
- jsTestLog("Caught exception after exhausting retries: " + e);
- }
- assert.eq(attempts, numRetries + 1);
-
- jsTestLog("Test connecting to a node with an invalid hostname.");
- let invalidHostname = "very-invalid-host-name";
- attempts = 0;
- try {
- retryOnNetworkError(function() {
- attempts++;
- new Mongo(invalidHostname);
- }, numRetries, sleepMs);
- } catch (e) {
- jsTestLog("Caught exception after exhausting retries: " + e);
- }
- assert.eq(attempts, numRetries + 1);
+} catch (e) {
+ jsTestLog("Caught exception after exhausting retries: " + e);
+}
+assert.eq(attempts, numRetries + 1);
+jsTestLog("Test connecting to a node with an invalid hostname.");
+let invalidHostname = "very-invalid-host-name";
+attempts = 0;
+try {
+ retryOnNetworkError(function() {
+ attempts++;
+ new Mongo(invalidHostname);
+ }, numRetries, sleepMs);
+} catch (e) {
+ jsTestLog("Caught exception after exhausting retries: " + e);
+}
+assert.eq(attempts, numRetries + 1);
}()); \ No newline at end of file
diff --git a/jstests/noPassthrough/retryable_writes_standalone_api.js b/jstests/noPassthrough/retryable_writes_standalone_api.js
index ff091624358..228a2c8ea99 100644
--- a/jstests/noPassthrough/retryable_writes_standalone_api.js
+++ b/jstests/noPassthrough/retryable_writes_standalone_api.js
@@ -2,24 +2,24 @@
* Verify behavior of retryable write commands on a standalone mongod.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- const standalone = MongoRunner.runMongod();
- const testDB = standalone.getDB("test");
+const standalone = MongoRunner.runMongod();
+const testDB = standalone.getDB("test");
- // Commands sent to standalone nodes are not allowed to have transaction numbers.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {insert: "foo", documents: [{x: 1}], txnNumber: NumberLong(1), lsid: {id: UUID()}}),
- ErrorCodes.IllegalOperation,
- "expected command with transaction number to fail on standalone mongod");
+// Commands sent to standalone nodes are not allowed to have transaction numbers.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {insert: "foo", documents: [{x: 1}], txnNumber: NumberLong(1), lsid: {id: UUID()}}),
+ ErrorCodes.IllegalOperation,
+ "expected command with transaction number to fail on standalone mongod");
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
}());
diff --git a/jstests/noPassthrough/rollback_wt_cache_full.js b/jstests/noPassthrough/rollback_wt_cache_full.js
index c5d74431310..6ea271b1dba 100644
--- a/jstests/noPassthrough/rollback_wt_cache_full.js
+++ b/jstests/noPassthrough/rollback_wt_cache_full.js
@@ -4,87 +4,89 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/rollback_test.js');
+load('jstests/replsets/libs/rollback_test.js');
- // Use constrained cache size for both data bearing nodes so that it doesn't matter which node
- // RollbackTest selects as the rollback node.
- const nodeOptions = {
- // Don't log slow operations.
- slowms: 30000,
- // Constrain the storage engine cache size to make it easier to fill it up with unflushed
- // modifications.
- // This test uses a smaller cache size than the other wt_cache_full.js tests because it
- // has to work with the hard-coded 300 MB refetch limit in the pre-4.0 rollback
- // implementation.
- wiredTigerCacheSizeGB: 0.5,
- };
- const rst = new ReplSetTest({
- nodes: 3,
- nodeOptions: nodeOptions,
- useBridge: true,
- });
+// Use constrained cache size for both data bearing nodes so that it doesn't matter which node
+// RollbackTest selects as the rollback node.
+const nodeOptions = {
+ // Don't log slow operations.
+ slowms: 30000,
+ // Constrain the storage engine cache size to make it easier to fill it up with unflushed
+ // modifications.
+ // This test uses a smaller cache size than the other wt_cache_full.js tests because it
+ // has to work with the hard-coded 300 MB refetch limit in the pre-4.0 rollback
+ // implementation.
+ wiredTigerCacheSizeGB: 0.5,
+};
+const rst = new ReplSetTest({
+ nodes: 3,
+ nodeOptions: nodeOptions,
+ useBridge: true,
+});
- rst.startSet();
- let config = rst.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- rst.initiate(config);
+rst.startSet();
+let config = rst.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+rst.initiate(config);
- // Prior to 4.0, rollback imposed a 300 MB limit on the total size of documents to refetch from
- // the sync source. Therefore, we select values for numDocs and minDocSizeMB, while accounting
- // for some small per-document overhead, such that we are able to stay under this 300 MB limit.
- // This test uses single updates, rather than the multiple updates in the other wt_cache_full.js
- // tests because the refetching logic in the pre-4.0 algorithm depends on which documents were
- // modified, not on the number of modifications to each document.
- // This test has been observed to hang under some non-standard build platforms so we are
- // giving ourselves a slightly larger allowance of 5 documents from the theoretical maximum
- // of documents calculated from the rollback size limit.
- // Using a numDocs value of (maxDocs - 5) is sufficiently large enough to reproduce the memory
- // pressure issue in 3.6.5 but small enough for this test to perform uniformly across most of
- // the platforms in our continuous integration system.
- const rollbackSizeLimitMB = 300;
- const minDocSizeMB = 10;
- const largeString = 'x'.repeat(minDocSizeMB * 1024 * 1024);
- // TODO(SERVER-39774): Increase numDocs to Math.floor(rollbackSizeLimitMB / minDocSizeMB).
- const numDocs = 1;
+// Prior to 4.0, rollback imposed a 300 MB limit on the total size of documents to refetch from
+// the sync source. Therefore, we select values for numDocs and minDocSizeMB, while accounting
+// for some small per-document overhead, such that we are able to stay under this 300 MB limit.
+// This test uses single updates, rather than the multiple updates in the other wt_cache_full.js
+// tests because the refetching logic in the pre-4.0 algorithm depends on which documents were
+// modified, not on the number of modifications to each document.
+// This test has been observed to hang under some non-standard build platforms so we are
+// giving ourselves a slightly larger allowance of 5 documents from the theoretical maximum
+// of documents calculated from the rollback size limit.
+// Using a numDocs value of (maxDocs - 5) is sufficiently large enough to reproduce the memory
+// pressure issue in 3.6.5 but small enough for this test to perform uniformly across most of
+// the platforms in our continuous integration system.
+const rollbackSizeLimitMB = 300;
+const minDocSizeMB = 10;
+const largeString = 'x'.repeat(minDocSizeMB * 1024 * 1024);
+// TODO(SERVER-39774): Increase numDocs to Math.floor(rollbackSizeLimitMB / minDocSizeMB).
+const numDocs = 1;
- // Operations that will be present on both nodes, before the common point.
- const collName = 'test.t';
- let CommonOps = (node) => {
- const coll = node.getCollection(collName);
- jsTestLog('Inserting ' + numDocs + ' documents of ' + minDocSizeMB + ' MB each into ' +
- collName + '.');
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(coll.save(
- {_id: i, a: 0, x: largeString},
- {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
- };
+// Operations that will be present on both nodes, before the common point.
+const collName = 'test.t';
+let CommonOps = (node) => {
+ const coll = node.getCollection(collName);
+ jsTestLog('Inserting ' + numDocs + ' documents of ' + minDocSizeMB + ' MB each into ' +
+ collName + '.');
+ for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, a: 0, x: largeString},
+ {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+ }
+ assert.eq(numDocs, coll.find().itcount());
+};
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- const coll = node.getCollection(collName);
- jsTestLog('Updating ' + numDocs +
- ' documents on the primary. These updates will be rolled back.');
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(coll.update({_id: i}, {$inc: {a: 1}}));
- }
- };
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ const coll = node.getCollection(collName);
+ jsTestLog('Updating ' + numDocs +
+ ' documents on the primary. These updates will be rolled back.');
+ for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {a: 1}}));
+ }
+};
- // Set up Rollback Test.
- const rollbackTest = new RollbackTest(rst.name, rst);
- CommonOps(rollbackTest.getPrimary());
+// Set up Rollback Test.
+const rollbackTest = new RollbackTest(rst.name, rst);
+CommonOps(rollbackTest.getPrimary());
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/noPassthrough/rollback_wt_drop.js b/jstests/noPassthrough/rollback_wt_drop.js
index fcf9c2522d9..8c235695439 100644
--- a/jstests/noPassthrough/rollback_wt_drop.js
+++ b/jstests/noPassthrough/rollback_wt_drop.js
@@ -3,150 +3,148 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
-
- load('jstests/replsets/libs/rollback_test.js');
-
- // Returns list of collections in database, including pending drops.
- // Assumes all collections fit in first batch of results.
- function listCollections(database) {
- return assert
- .commandWorked(database.runCommand({listCollections: 1, includePendingDrops: true}))
- .cursor.firstBatch;
- }
-
- // Operations that will be present on both nodes, before the common point.
- const collName = 'test.t';
- const renameTargetCollName = 'test.x';
- const noOpsToRollbackCollName = 'test.k';
- let CommonOps = (node) => {
- const coll = node.getCollection(collName);
- const mydb = coll.getDB();
- assert.commandWorked(mydb.createCollection(coll.getName()));
- assert.commandWorked(coll.createIndex({a: 1}));
- assert.commandWorked(coll.insert({_id: 0, a: 0}));
-
- // Replicate a drop.
- const replicatedDropCollName = 'w';
- const collToDrop = mydb.getCollection(replicatedDropCollName);
- assert.commandWorked(mydb.createCollection(collToDrop.getName()));
- assert(collToDrop.drop());
-
- // This collection will be dropped during a rename.
- const renameTargetColl = node.getCollection(renameTargetCollName);
- assert.commandWorked(mydb.createCollection(renameTargetColl.getName()));
- assert.commandWorked(renameTargetColl.createIndex({b: 1}));
- assert.commandWorked(renameTargetColl.insert({_id: 8, b: 8}));
- assert.commandWorked(renameTargetColl.insert({_id: 9, b: 9}));
-
- // This collection will be dropped without any CRUD ops to rollback.
- const noOpsToRollbackColl = node.getCollection(noOpsToRollbackCollName);
- assert.commandWorked(mydb.createCollection(noOpsToRollbackColl.getName()));
- assert.commandWorked(noOpsToRollbackColl.createIndex({c: 1}));
- assert.commandWorked(noOpsToRollbackColl.insert({_id: 20, c: 20}));
- assert.commandWorked(noOpsToRollbackColl.insert({_id: 21, c: 21}));
- };
-
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- const coll = node.getCollection(collName);
-
- // Rollback algorithm may refer to dropped collection if it has to undo an insert.
- assert.commandWorked(coll.insert({_id: 1, a: 1}));
-
- const mydb = coll.getDB();
- const collectionsBeforeDrop = listCollections(mydb);
- assert(coll.drop());
- const collectionsAfterDrop = listCollections(mydb);
- const supportsPendingDrops = mydb.serverStatus().storageEngine.supportsPendingDrops;
- jsTestLog('supportsPendingDrops = ' + supportsPendingDrops);
- if (!supportsPendingDrops) {
- assert.eq(collectionsAfterDrop.length,
- collectionsBeforeDrop.length,
- 'listCollections did not report the same number of collections in database ' +
- mydb.getName() + ' after dropping collection ' + coll.getFullName() +
- '. Before: ' + tojson(collectionsBeforeDrop) + '. After: ' +
- tojson(collectionsAfterDrop));
- } else {
- assert.lt(collectionsAfterDrop.length,
- collectionsBeforeDrop.length,
- 'listCollections did not report fewer collections in database ' +
- mydb.getName() + ' after dropping collection ' + coll.getFullName() +
- '. Before: ' + tojson(collectionsBeforeDrop) + '. After: ' +
- tojson(collectionsAfterDrop));
- assert.gt(mydb.serverStatus().storageEngine.dropPendingIdents,
- 0,
- 'There is no drop pending ident in the storage engine.');
- }
-
- const renameTargetColl = node.getCollection(renameTargetCollName);
- assert.commandWorked(renameTargetColl.insert({_id: 10, b: 10}));
- assert.commandWorked(renameTargetColl.insert({_id: 11, b: 11}));
- const renameSourceColl = mydb.getCollection('z');
- assert.commandWorked(mydb.createCollection(renameSourceColl.getName()));
- assert.commandWorked(renameSourceColl.renameCollection(renameTargetColl.getName(), true));
-
- const noOpsToRollbackColl = node.getCollection(noOpsToRollbackCollName);
- assert(noOpsToRollbackColl.drop());
-
- // This collection will not exist after rollback.
- const tempColl = node.getCollection('test.a');
- assert.commandWorked(mydb.createCollection(tempColl.getName()));
- assert.commandWorked(tempColl.insert({_id: 100, y: 100}));
- assert(tempColl.drop());
-
- // restartCatalog should not remove drop-pending idents.
- assert.commandWorked(mydb.adminCommand({restartCatalog: 1}));
- };
-
- // Set up Rollback Test.
- const rollbackTest = new RollbackTest();
- CommonOps(rollbackTest.getPrimary());
-
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
-
- {
- // Check collection drop oplog entry.
- const replTest = rollbackTest.getTestFixture();
- const ops = replTest.dumpOplog(rollbackNode, {ns: 'test.$cmd', 'o.drop': 't'});
- assert.eq(1, ops.length);
- const op = ops[0];
- assert(op.hasOwnProperty('o2'), 'expected o2 field in drop oplog entry: ' + tojson(op));
- assert(op.o2.hasOwnProperty('numRecords'),
- 'expected count in drop oplog entry: ' + tojson(op));
- assert.eq(2, op.o2.numRecords, 'incorrect count in drop oplog entry: ' + tojson(op));
- }
-
- // Check collection rename oplog entry.
- {
- const replTest = rollbackTest.getTestFixture();
- const ops = replTest.dumpOplog(
- rollbackNode, {ns: 'test.$cmd', 'o.renameCollection': 'test.z', 'o.to': 'test.x'});
- assert.eq(1, ops.length);
- const op = ops[0];
- assert(op.hasOwnProperty('o2'), 'expected o2 field in rename oplog entry: ' + tojson(op));
- assert(op.o2.hasOwnProperty('numRecords'),
- 'expected count in rename oplog entry: ' + tojson(op));
- assert.eq(4, op.o2.numRecords, 'incorrect count in rename oplog entry: ' + tojson(op));
+'use strict';
+
+load('jstests/replsets/libs/rollback_test.js');
+
+// Returns list of collections in database, including pending drops.
+// Assumes all collections fit in first batch of results.
+function listCollections(database) {
+ return assert
+ .commandWorked(database.runCommand({listCollections: 1, includePendingDrops: true}))
+ .cursor.firstBatch;
+}
+
+// Operations that will be present on both nodes, before the common point.
+const collName = 'test.t';
+const renameTargetCollName = 'test.x';
+const noOpsToRollbackCollName = 'test.k';
+let CommonOps = (node) => {
+ const coll = node.getCollection(collName);
+ const mydb = coll.getDB();
+ assert.commandWorked(mydb.createCollection(coll.getName()));
+ assert.commandWorked(coll.createIndex({a: 1}));
+ assert.commandWorked(coll.insert({_id: 0, a: 0}));
+
+ // Replicate a drop.
+ const replicatedDropCollName = 'w';
+ const collToDrop = mydb.getCollection(replicatedDropCollName);
+ assert.commandWorked(mydb.createCollection(collToDrop.getName()));
+ assert(collToDrop.drop());
+
+ // This collection will be dropped during a rename.
+ const renameTargetColl = node.getCollection(renameTargetCollName);
+ assert.commandWorked(mydb.createCollection(renameTargetColl.getName()));
+ assert.commandWorked(renameTargetColl.createIndex({b: 1}));
+ assert.commandWorked(renameTargetColl.insert({_id: 8, b: 8}));
+ assert.commandWorked(renameTargetColl.insert({_id: 9, b: 9}));
+
+ // This collection will be dropped without any CRUD ops to rollback.
+ const noOpsToRollbackColl = node.getCollection(noOpsToRollbackCollName);
+ assert.commandWorked(mydb.createCollection(noOpsToRollbackColl.getName()));
+ assert.commandWorked(noOpsToRollbackColl.createIndex({c: 1}));
+ assert.commandWorked(noOpsToRollbackColl.insert({_id: 20, c: 20}));
+ assert.commandWorked(noOpsToRollbackColl.insert({_id: 21, c: 21}));
+};
+
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ const coll = node.getCollection(collName);
+
+ // Rollback algorithm may refer to dropped collection if it has to undo an insert.
+ assert.commandWorked(coll.insert({_id: 1, a: 1}));
+
+ const mydb = coll.getDB();
+ const collectionsBeforeDrop = listCollections(mydb);
+ assert(coll.drop());
+ const collectionsAfterDrop = listCollections(mydb);
+ const supportsPendingDrops = mydb.serverStatus().storageEngine.supportsPendingDrops;
+ jsTestLog('supportsPendingDrops = ' + supportsPendingDrops);
+ if (!supportsPendingDrops) {
+ assert.eq(collectionsAfterDrop.length,
+ collectionsBeforeDrop.length,
+ 'listCollections did not report the same number of collections in database ' +
+ mydb.getName() + ' after dropping collection ' + coll.getFullName() +
+ '. Before: ' + tojson(collectionsBeforeDrop) +
+ '. After: ' + tojson(collectionsAfterDrop));
+ } else {
+ assert.lt(collectionsAfterDrop.length,
+ collectionsBeforeDrop.length,
+ 'listCollections did not report fewer collections in database ' + mydb.getName() +
+ ' after dropping collection ' + coll.getFullName() + '. Before: ' +
+ tojson(collectionsBeforeDrop) + '. After: ' + tojson(collectionsAfterDrop));
+ assert.gt(mydb.serverStatus().storageEngine.dropPendingIdents,
+ 0,
+ 'There is no drop pending ident in the storage engine.');
}
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check collection count.
- const primary = rollbackTest.getPrimary();
- const coll = primary.getCollection(collName);
- assert.eq(1, coll.find().itcount());
- assert.eq(1, coll.count());
- const renameTargetColl = primary.getCollection(renameTargetCollName);
- assert.eq(2, renameTargetColl.find().itcount());
- assert.eq(2, renameTargetColl.count());
- const noOpsToRollbackColl = primary.getCollection(noOpsToRollbackCollName);
- assert.eq(2, noOpsToRollbackColl.find().itcount());
- assert.eq(2, noOpsToRollbackColl.count());
-
- rollbackTest.stop();
+ const renameTargetColl = node.getCollection(renameTargetCollName);
+ assert.commandWorked(renameTargetColl.insert({_id: 10, b: 10}));
+ assert.commandWorked(renameTargetColl.insert({_id: 11, b: 11}));
+ const renameSourceColl = mydb.getCollection('z');
+ assert.commandWorked(mydb.createCollection(renameSourceColl.getName()));
+ assert.commandWorked(renameSourceColl.renameCollection(renameTargetColl.getName(), true));
+
+ const noOpsToRollbackColl = node.getCollection(noOpsToRollbackCollName);
+ assert(noOpsToRollbackColl.drop());
+
+ // This collection will not exist after rollback.
+ const tempColl = node.getCollection('test.a');
+ assert.commandWorked(mydb.createCollection(tempColl.getName()));
+ assert.commandWorked(tempColl.insert({_id: 100, y: 100}));
+ assert(tempColl.drop());
+
+ // restartCatalog should not remove drop-pending idents.
+ assert.commandWorked(mydb.adminCommand({restartCatalog: 1}));
+};
+
+// Set up Rollback Test.
+const rollbackTest = new RollbackTest();
+CommonOps(rollbackTest.getPrimary());
+
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
+
+{
+ // Check collection drop oplog entry.
+ const replTest = rollbackTest.getTestFixture();
+ const ops = replTest.dumpOplog(rollbackNode, {ns: 'test.$cmd', 'o.drop': 't'});
+ assert.eq(1, ops.length);
+ const op = ops[0];
+ assert(op.hasOwnProperty('o2'), 'expected o2 field in drop oplog entry: ' + tojson(op));
+ assert(op.o2.hasOwnProperty('numRecords'), 'expected count in drop oplog entry: ' + tojson(op));
+ assert.eq(2, op.o2.numRecords, 'incorrect count in drop oplog entry: ' + tojson(op));
+}
+
+// Check collection rename oplog entry.
+{
+ const replTest = rollbackTest.getTestFixture();
+ const ops = replTest.dumpOplog(
+ rollbackNode, {ns: 'test.$cmd', 'o.renameCollection': 'test.z', 'o.to': 'test.x'});
+ assert.eq(1, ops.length);
+ const op = ops[0];
+ assert(op.hasOwnProperty('o2'), 'expected o2 field in rename oplog entry: ' + tojson(op));
+ assert(op.o2.hasOwnProperty('numRecords'),
+ 'expected count in rename oplog entry: ' + tojson(op));
+ assert.eq(4, op.o2.numRecords, 'incorrect count in rename oplog entry: ' + tojson(op));
+}
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// Check collection count.
+const primary = rollbackTest.getPrimary();
+const coll = primary.getCollection(collName);
+assert.eq(1, coll.find().itcount());
+assert.eq(1, coll.count());
+const renameTargetColl = primary.getCollection(renameTargetCollName);
+assert.eq(2, renameTargetColl.find().itcount());
+assert.eq(2, renameTargetColl.count());
+const noOpsToRollbackColl = primary.getCollection(noOpsToRollbackCollName);
+assert.eq(2, noOpsToRollbackColl.find().itcount());
+assert.eq(2, noOpsToRollbackColl.count());
+
+rollbackTest.stop();
})();
diff --git a/jstests/noPassthrough/router_transactions_metrics.js b/jstests/noPassthrough/router_transactions_metrics.js
index 9fd6c5f83eb..778f2adebea 100644
--- a/jstests/noPassthrough/router_transactions_metrics.js
+++ b/jstests/noPassthrough/router_transactions_metrics.js
@@ -2,576 +2,575 @@
// basic cases.
// @tags: [uses_transactions, uses_multi_shard_transaction]
(function() {
- "use strict";
-
- load("jstests/sharding/libs/sharded_transactions_helpers.js");
-
- // Verifies the transaction server status response has the fields that we expect.
- function verifyServerStatusFields(res) {
- const expectedFields = [
- "totalStarted",
- "totalAborted",
- "abortCause",
- "totalCommitted",
- "totalContactedParticipants",
- "totalParticipantsAtCommit",
- "totalRequestsTargeted",
- "commitTypes",
- ];
-
- assert(
- res.hasOwnProperty("transactions"),
- "Expected serverStatus response to have a 'transactions' field, res: " + tojson(res));
-
- assert.hasFields(res.transactions,
- expectedFields,
- "The 'transactions' field did not have all of the expected fields, res: " +
- tojson(res.transactions));
-
- assert.eq(expectedFields.length,
- Object.keys(res.transactions).length,
- "the 'transactions' field had an unexpected number of fields, res: " +
- tojson(res.transactions));
-
- // Verify the "commitTypes" sub-object has the expected fields.
- const commitTypes = [
- "noShards",
- "singleShard",
- "singleWriteShard",
- "readOnly",
- "twoPhaseCommit",
- "recoverWithToken",
- ];
- const commitTypeFields = ["initiated", "successful", "successfulDurationMicros"];
-
- assert.hasFields(res.transactions.commitTypes,
- commitTypes,
- "The 'transactions' field did not have each expected commit type, res: " +
- tojson(res.transactions));
-
- assert.eq(commitTypes.length,
- Object.keys(res.transactions.commitTypes).length,
- "the 'transactions' field had an unexpected number of commit types, res: " +
- tojson(res.transactions));
-
- commitTypes.forEach((type) => {
- assert.hasFields(res.transactions.commitTypes[type],
- commitTypeFields,
- "commit type " + type +
- " did not have all the expected fields, commit types: " +
- tojson(res.transactions.commitTypes));
-
- assert.eq(commitTypeFields.length,
- Object.keys(res.transactions.commitTypes[type]).length,
- "commit type " + type +
- " had an unexpected number of fields, commit types: " +
- tojson(res.transactions.commitTypes));
- });
+"use strict";
+
+load("jstests/sharding/libs/sharded_transactions_helpers.js");
+
+// Verifies the transaction server status response has the fields that we expect.
+function verifyServerStatusFields(res) {
+ const expectedFields = [
+ "totalStarted",
+ "totalAborted",
+ "abortCause",
+ "totalCommitted",
+ "totalContactedParticipants",
+ "totalParticipantsAtCommit",
+ "totalRequestsTargeted",
+ "commitTypes",
+ ];
+
+ assert(res.hasOwnProperty("transactions"),
+ "Expected serverStatus response to have a 'transactions' field, res: " + tojson(res));
+
+ assert.hasFields(res.transactions,
+ expectedFields,
+ "The 'transactions' field did not have all of the expected fields, res: " +
+ tojson(res.transactions));
+
+ assert.eq(expectedFields.length,
+ Object.keys(res.transactions).length,
+ "the 'transactions' field had an unexpected number of fields, res: " +
+ tojson(res.transactions));
+
+ // Verify the "commitTypes" sub-object has the expected fields.
+ const commitTypes = [
+ "noShards",
+ "singleShard",
+ "singleWriteShard",
+ "readOnly",
+ "twoPhaseCommit",
+ "recoverWithToken",
+ ];
+ const commitTypeFields = ["initiated", "successful", "successfulDurationMicros"];
+
+ assert.hasFields(res.transactions.commitTypes,
+ commitTypes,
+ "The 'transactions' field did not have each expected commit type, res: " +
+ tojson(res.transactions));
+
+ assert.eq(commitTypes.length,
+ Object.keys(res.transactions.commitTypes).length,
+ "the 'transactions' field had an unexpected number of commit types, res: " +
+ tojson(res.transactions));
+
+ commitTypes.forEach((type) => {
+ assert.hasFields(res.transactions.commitTypes[type],
+ commitTypeFields,
+ "commit type " + type +
+ " did not have all the expected fields, commit types: " +
+ tojson(res.transactions.commitTypes));
+
+ assert.eq(commitTypeFields.length,
+ Object.keys(res.transactions.commitTypes[type]).length,
+ "commit type " + type + " had an unexpected number of fields, commit types: " +
+ tojson(res.transactions.commitTypes));
+ });
+}
+
+class ExpectedCommitType {
+ constructor() {
+ this.initiated = 0;
+ this.successful = 0;
+ this.successfulDurationMicros = 0;
}
+}
- class ExpectedCommitType {
- constructor() {
- this.initiated = 0;
- this.successful = 0;
- this.successfulDurationMicros = 0;
- }
+class ExpectedAbortCause {
+ constructor() {
}
-
- class ExpectedAbortCause {
- constructor() {
- }
+}
+
+class ExpectedTransactionServerStatus {
+ constructor() {
+ this.totalStarted = 0;
+ this.totalAborted = 0;
+ this.abortCause = new ExpectedAbortCause();
+ this.totalCommitted = 0;
+ this.totalContactedParticipants = 0;
+ this.totalParticipantsAtCommit = 0;
+ this.totalRequestsTargeted = 0;
+ this.commitTypes = {
+ noShards: new ExpectedCommitType(),
+ singleShard: new ExpectedCommitType(),
+ singleWriteShard: new ExpectedCommitType(),
+ readOnly: new ExpectedCommitType(),
+ twoPhaseCommit: new ExpectedCommitType(),
+ recoverWithToken: new ExpectedCommitType(),
+ };
}
-
- class ExpectedTransactionServerStatus {
- constructor() {
- this.totalStarted = 0;
- this.totalAborted = 0;
- this.abortCause = new ExpectedAbortCause();
- this.totalCommitted = 0;
- this.totalContactedParticipants = 0;
- this.totalParticipantsAtCommit = 0;
- this.totalRequestsTargeted = 0;
- this.commitTypes = {
- noShards: new ExpectedCommitType(),
- singleShard: new ExpectedCommitType(),
- singleWriteShard: new ExpectedCommitType(),
- readOnly: new ExpectedCommitType(),
- twoPhaseCommit: new ExpectedCommitType(),
- recoverWithToken: new ExpectedCommitType(),
- };
+}
+
+// Verifies the transaction values in the server status response match the provided values.
+function verifyServerStatusValues(st, expectedStats) {
+ const res = assert.commandWorked(st.s.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(res);
+
+ const stats = res.transactions;
+ assert.eq(expectedStats.totalStarted,
+ stats.totalStarted,
+ "unexpected totalStarted, res: " + tojson(stats));
+ assert.eq(expectedStats.totalAborted,
+ stats.totalAborted,
+ "unexpected totalAborted, res: " + tojson(stats));
+ assert.eq(expectedStats.totalCommitted,
+ stats.totalCommitted,
+ "unexpected totalCommitted, res: " + tojson(stats));
+ assert.eq(expectedStats.totalContactedParticipants,
+ stats.totalContactedParticipants,
+ "unexpected totalContactedParticipants, res: " + tojson(stats));
+ assert.eq(expectedStats.totalParticipantsAtCommit,
+ stats.totalParticipantsAtCommit,
+ "unexpected totalParticipantsAtCommit, res: " + tojson(stats));
+ assert.eq(expectedStats.totalRequestsTargeted,
+ stats.totalRequestsTargeted,
+ "unexpected totalRequestsTargeted, res: " + tojson(stats));
+
+ const commitTypes = res.transactions.commitTypes;
+ Object.keys(commitTypes).forEach((commitType) => {
+ assert.eq(
+ expectedStats.commitTypes[commitType].initiated,
+ commitTypes[commitType].initiated,
+ "unexpected initiated for " + commitType + ", commit types: " + tojson(commitTypes));
+ assert.eq(
+ expectedStats.commitTypes[commitType].successful,
+ commitTypes[commitType].successful,
+ "unexpected successful for " + commitType + ", commit types: " + tojson(commitTypes));
+
+ assert.lte(expectedStats.commitTypes[commitType].successfulDurationMicros,
+ commitTypes[commitType].successfulDurationMicros,
+ "unexpected successfulDurationMicros for " + commitType +
+ ", commit types: " + tojson(commitTypes));
+ expectedStats.commitTypes[commitType].successfulDurationMicros =
+ commitTypes[commitType].successfulDurationMicros;
+
+ if (commitTypes[commitType].successful != 0) {
+ assert.gt(commitTypes[commitType].successfulDurationMicros,
+ 0,
+ "unexpected successfulDurationMicros for " + commitType +
+ ", commit types: " + tojson(commitTypes));
}
- }
-
- // Verifies the transaction values in the server status response match the provided values.
- function verifyServerStatusValues(st, expectedStats) {
- const res = assert.commandWorked(st.s.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(res);
-
- const stats = res.transactions;
- assert.eq(expectedStats.totalStarted,
- stats.totalStarted,
- "unexpected totalStarted, res: " + tojson(stats));
- assert.eq(expectedStats.totalAborted,
- stats.totalAborted,
- "unexpected totalAborted, res: " + tojson(stats));
- assert.eq(expectedStats.totalCommitted,
- stats.totalCommitted,
- "unexpected totalCommitted, res: " + tojson(stats));
- assert.eq(expectedStats.totalContactedParticipants,
- stats.totalContactedParticipants,
- "unexpected totalContactedParticipants, res: " + tojson(stats));
- assert.eq(expectedStats.totalParticipantsAtCommit,
- stats.totalParticipantsAtCommit,
- "unexpected totalParticipantsAtCommit, res: " + tojson(stats));
- assert.eq(expectedStats.totalRequestsTargeted,
- stats.totalRequestsTargeted,
- "unexpected totalRequestsTargeted, res: " + tojson(stats));
-
- const commitTypes = res.transactions.commitTypes;
- Object.keys(commitTypes).forEach((commitType) => {
- assert.eq(expectedStats.commitTypes[commitType].initiated,
- commitTypes[commitType].initiated,
- "unexpected initiated for " + commitType + ", commit types: " +
- tojson(commitTypes));
- assert.eq(expectedStats.commitTypes[commitType].successful,
- commitTypes[commitType].successful,
- "unexpected successful for " + commitType + ", commit types: " +
- tojson(commitTypes));
-
- assert.lte(expectedStats.commitTypes[commitType].successfulDurationMicros,
- commitTypes[commitType].successfulDurationMicros,
- "unexpected successfulDurationMicros for " + commitType +
- ", commit types: " + tojson(commitTypes));
- expectedStats.commitTypes[commitType].successfulDurationMicros =
- commitTypes[commitType].successfulDurationMicros;
-
- if (commitTypes[commitType].successful != 0) {
- assert.gt(commitTypes[commitType].successfulDurationMicros,
- 0,
- "unexpected successfulDurationMicros for " + commitType +
- ", commit types: " + tojson(commitTypes));
- }
- });
-
- const abortCause = res.transactions.abortCause;
- Object.keys(abortCause).forEach((cause) => {
- assert.eq(expectedStats.abortCause[cause],
- abortCause[cause],
- "unexpected abortCause for " + cause + ", res: " + tojson(stats));
- });
-
- assert.eq(Object.keys(abortCause).length,
- Object.keys(expectedStats.abortCause).length,
- "the 'transactions' field had an unexpected number of abort causes, res: " +
- tojson(stats));
- }
-
- function abortFromUnderneath(st, session) {
- st._rs.forEach((rs) => {
- assert.commandWorkedOrFailedWithCode(rs.test.getPrimary().adminCommand({
- abortTransaction: 1,
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
- });
- }
-
- const dbName = "test";
- const collName = "foo";
- const ns = dbName + '.' + collName;
-
- const st = new ShardingTest({shards: 2, mongos: 2, config: 1});
-
- const session = st.s.startSession();
- const sessionDB = session.getDatabase(dbName);
+ });
+
+ const abortCause = res.transactions.abortCause;
+ Object.keys(abortCause).forEach((cause) => {
+ assert.eq(expectedStats.abortCause[cause],
+ abortCause[cause],
+ "unexpected abortCause for " + cause + ", res: " + tojson(stats));
+ });
+
+ assert.eq(
+ Object.keys(abortCause).length,
+ Object.keys(expectedStats.abortCause).length,
+ "the 'transactions' field had an unexpected number of abort causes, res: " + tojson(stats));
+}
+
+function abortFromUnderneath(st, session) {
+ st._rs.forEach((rs) => {
+ assert.commandWorkedOrFailedWithCode(rs.test.getPrimary().adminCommand({
+ abortTransaction: 1,
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ autocommit: false
+ }),
+ ErrorCodes.NoSuchTransaction);
+ });
+}
- const otherRouterSession = st.s1.startSession();
- const otherRouterSessionDB = otherRouterSession.getDatabase(dbName);
+const dbName = "test";
+const collName = "foo";
+const ns = dbName + '.' + collName;
- // Set up two chunks: [-inf, 0), [0, inf) one on each shard, with one document in each.
+const st = new ShardingTest({shards: 2, mongos: 2, config: 1});
- assert.commandWorked(sessionDB[collName].insert({_id: -1}));
- assert.commandWorked(sessionDB[collName].insert({_id: 1}));
+const session = st.s.startSession();
+const sessionDB = session.getDatabase(dbName);
- assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
- st.ensurePrimaryShard(dbName, st.shard0.shardName);
- assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
- assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
- assert.commandWorked(
- st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
- flushRoutersAndRefreshShardMetadata(st, {ns});
+const otherRouterSession = st.s1.startSession();
+const otherRouterSessionDB = otherRouterSession.getDatabase(dbName);
- let expectedStats = new ExpectedTransactionServerStatus();
+// Set up two chunks: [-inf, 0), [0, inf) one on each shard, with one document in each.
- //
- // Helpers for setting up transactions that will trigger the various commit paths.
- //
+assert.commandWorked(sessionDB[collName].insert({_id: -1}));
+assert.commandWorked(sessionDB[collName].insert({_id: 1}));
- function startNoShardsTransaction() {
- session.startTransaction();
- assert.commandWorked(session.getDatabase("doesntExist").runCommand({find: collName}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+st.ensurePrimaryShard(dbName, st.shard0.shardName);
+assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
+assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: 0}}));
+assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 1}, to: st.shard1.shardName}));
+flushRoutersAndRefreshShardMetadata(st, {ns});
- expectedStats.totalStarted += 1;
- verifyServerStatusValues(st, expectedStats);
- }
+let expectedStats = new ExpectedTransactionServerStatus();
- function startSingleShardTransaction() {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({x: 1}));
+//
+// Helpers for setting up transactions that will trigger the various commit paths.
+//
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- }
+function startNoShardsTransaction() {
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("doesntExist").runCommand({find: collName}));
- function startSingleWriteShardTransaction() {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({x: 1}));
+ expectedStats.totalStarted += 1;
+ verifyServerStatusValues(st, expectedStats);
+}
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
+function startSingleShardTransaction() {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({x: 1}));
- assert.commandWorked(sessionDB.runCommand({find: collName}));
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+}
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- }
+function startSingleWriteShardTransaction() {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({x: 1}));
- function startReadOnlyTransaction() {
- session.startTransaction();
- assert.commandWorked(sessionDB.runCommand({find: collName}));
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 2;
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- }
+ assert.commandWorked(sessionDB.runCommand({find: collName}));
- function startTwoPhaseCommitTransaction() {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({_id: -5}));
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+}
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
+function startReadOnlyTransaction() {
+ session.startTransaction();
+ assert.commandWorked(sessionDB.runCommand({find: collName}));
- assert.commandWorked(sessionDB[collName].insert({_id: 5}));
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 2;
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+}
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- }
+function startTwoPhaseCommitTransaction() {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({_id: -5}));
- function setUpTransactionToRecoverCommit({shouldCommit}) {
- otherRouterSession.startTransaction();
- let resWithRecoveryToken = assert.commandWorked(
- otherRouterSessionDB.runCommand({insert: collName, documents: [{x: 5}]}));
- if (shouldCommit) {
- assert.commandWorked(otherRouterSession.commitTransaction_forTesting());
- } else {
- assert.commandWorked(otherRouterSession.abortTransaction_forTesting());
- }
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
- // The stats on the main mongos shouldn't have changed.
- verifyServerStatusValues(st, expectedStats);
+ assert.commandWorked(sessionDB[collName].insert({_id: 5}));
- return resWithRecoveryToken.recoveryToken;
- }
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+}
- //
- // Test cases for serverStatus output.
- //
-
- jsTest.log("Default values.");
- (() => {
- verifyServerStatusValues(st, expectedStats);
- })();
-
- // Note committing a no shards transaction can only succeed.
- jsTest.log("Committed no shards transaction.");
- (() => {
- startNoShardsTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.noShards.initiated += 1;
- expectedStats.commitTypes.noShards.successful += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Successful single shard transaction.");
- (() => {
- startSingleShardTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.singleShard.initiated += 1;
- expectedStats.commitTypes.singleShard.successful += 1;
- expectedStats.totalParticipantsAtCommit += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Failed single shard transaction.");
- (() => {
- startSingleShardTransaction();
-
- abortFromUnderneath(st, session);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] = 1;
- expectedStats.commitTypes.singleShard.initiated += 1;
- expectedStats.totalParticipantsAtCommit += 1;
- // The one shard is targeted for the commit then the implicit abort.
- expectedStats.totalRequestsTargeted += 1 + 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Successful single write shard transaction.");
- (() => {
- startSingleWriteShardTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.singleWriteShard.initiated += 1;
- expectedStats.commitTypes.singleWriteShard.successful += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Failed single write shard transaction.");
- (() => {
- startSingleWriteShardTransaction();
-
- abortFromUnderneath(st, session);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] += 1;
- expectedStats.commitTypes.singleWriteShard.initiated += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- // In a single write shard commit, all read shards are committed first, then the
- // write shards, so if committing on a read shard fails, the write shards aren't targeted.
- // The implicit abort after will target all shards.
- expectedStats.totalRequestsTargeted += 1 + 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Successful read only transaction.");
- (() => {
- startReadOnlyTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.readOnly.initiated += 1;
- expectedStats.commitTypes.readOnly.successful += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Failed read only transaction.");
- (() => {
- startReadOnlyTransaction();
-
- abortFromUnderneath(st, session);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] += 1;
- expectedStats.commitTypes.readOnly.initiated += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- // Both shards are targeted for the commit then the implicit abort.
- expectedStats.totalRequestsTargeted += 2 + 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Successful two phase commit transaction.");
- (() => {
- startTwoPhaseCommitTransaction();
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.twoPhaseCommit.initiated += 1;
- expectedStats.commitTypes.twoPhaseCommit.successful += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
-
- // Remove the inserted documents.
- assert.commandWorked(sessionDB[collName].remove({_id: {$in: [-5, 5]}}));
- })();
-
- jsTest.log("Failed two phase commit transaction.");
- (() => {
- startTwoPhaseCommitTransaction();
-
- abortFromUnderneath(st, session);
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] += 1;
- expectedStats.commitTypes.twoPhaseCommit.initiated += 1;
- expectedStats.totalParticipantsAtCommit += 2;
- // There are no implicit aborts after two phase commit, so the coordinator is targeted once.
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Recover successful commit result.");
- (() => {
- const recoveryToken = setUpTransactionToRecoverCommit({shouldCommit: true});
-
- assert.commandWorked(st.s.adminCommand({
- commitTransaction: 1,
- lsid: otherRouterSession.getSessionId(),
- txnNumber: otherRouterSession.getTxnNumber_forTesting(),
- autocommit: false, recoveryToken
- }));
-
- expectedStats.totalStarted += 1;
- expectedStats.totalCommitted += 1;
- expectedStats.commitTypes.recoverWithToken.initiated += 1;
- expectedStats.commitTypes.recoverWithToken.successful += 1;
- // The participant stats shouldn't increase if we're recovering commit.
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Recover failed commit result.");
- (() => {
- const recoveryToken = setUpTransactionToRecoverCommit({shouldCommit: false});
-
- assert.commandFailedWithCode(st.s.adminCommand({
- commitTransaction: 1,
- lsid: otherRouterSession.getSessionId(),
- txnNumber: otherRouterSession.getTxnNumber_forTesting(),
- autocommit: false, recoveryToken
- }),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalStarted += 1;
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["NoSuchTransaction"] += 1;
- expectedStats.commitTypes.recoverWithToken.initiated += 1;
- // The participant stats shouldn't increase if we're recovering commit.
- // There are no implicit aborts during commit recovery, so the recovery shard is targeted
- // once.
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Empty recovery token.");
- (() => {
- otherRouterSession.startTransaction();
- let resWithEmptyRecoveryToken =
- assert.commandWorked(otherRouterSessionDB.runCommand({find: collName}));
+function setUpTransactionToRecoverCommit({shouldCommit}) {
+ otherRouterSession.startTransaction();
+ let resWithRecoveryToken = assert.commandWorked(
+ otherRouterSessionDB.runCommand({insert: collName, documents: [{x: 5}]}));
+ if (shouldCommit) {
assert.commandWorked(otherRouterSession.commitTransaction_forTesting());
+ } else {
+ assert.commandWorked(otherRouterSession.abortTransaction_forTesting());
+ }
- // The stats on the main mongos shouldn't have changed.
- verifyServerStatusValues(st, expectedStats);
-
- assert.commandFailedWithCode(st.s.adminCommand({
- commitTransaction: 1,
- lsid: otherRouterSession.getSessionId(),
- txnNumber: otherRouterSession.getTxnNumber_forTesting(),
- autocommit: false,
- recoveryToken: resWithEmptyRecoveryToken.recoveryToken
- }),
- ErrorCodes.NoSuchTransaction);
-
- expectedStats.totalStarted += 1;
- expectedStats.commitTypes.recoverWithToken.initiated += 1;
- // No requests are targeted and the decision isn't learned, so total committed/aborted and
- // total requests sent shouldn't change.
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Explicitly aborted transaction.");
- (() => {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({x: 2}));
-
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
-
- assert.commandWorked(session.abortTransaction_forTesting());
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["abort"] = 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Implicitly aborted transaction.");
- (() => {
- session.startTransaction();
- assert.commandFailedWithCode(sessionDB[collName].insert({_id: 1}), ErrorCodes.DuplicateKey);
-
- expectedStats.totalStarted += 1;
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["DuplicateKey"] = 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 2; // Plus one for the implicit abort.
- verifyServerStatusValues(st, expectedStats);
-
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // A failed abortTransaction leads to an implicit abort, so two requests are targeted.
- expectedStats.totalRequestsTargeted += 2;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- jsTest.log("Abandoned transaction.");
- (() => {
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].insert({_id: -15}));
-
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
-
- session.startTransaction_forTesting({}, {ignoreActiveTxn: true});
- assert.commandWorked(sessionDB[collName].insert({_id: -15}));
-
- expectedStats.totalStarted += 1;
- expectedStats.totalContactedParticipants += 1;
- expectedStats.totalRequestsTargeted += 1;
- // The router never learned if the previous transaction committed or aborted, so the aborted
- // counter shouldn't be incremented.
- verifyServerStatusValues(st, expectedStats);
-
- // Abort to clear the shell's session state.
- assert.commandWorked(session.abortTransaction_forTesting());
-
- expectedStats.totalAborted += 1;
- expectedStats.abortCause["abort"] += 1;
- expectedStats.totalRequestsTargeted += 1;
- verifyServerStatusValues(st, expectedStats);
- })();
-
- session.endSession();
- st.stop();
+ // The stats on the main mongos shouldn't have changed.
+ verifyServerStatusValues(st, expectedStats);
+
+ return resWithRecoveryToken.recoveryToken;
+}
+
+//
+// Test cases for serverStatus output.
+//
+
+jsTest.log("Default values.");
+(() => {
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+// Note committing a no shards transaction can only succeed.
+jsTest.log("Committed no shards transaction.");
+(() => {
+ startNoShardsTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.noShards.initiated += 1;
+ expectedStats.commitTypes.noShards.successful += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Successful single shard transaction.");
+(() => {
+ startSingleShardTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.singleShard.initiated += 1;
+ expectedStats.commitTypes.singleShard.successful += 1;
+ expectedStats.totalParticipantsAtCommit += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Failed single shard transaction.");
+(() => {
+ startSingleShardTransaction();
+
+ abortFromUnderneath(st, session);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] = 1;
+ expectedStats.commitTypes.singleShard.initiated += 1;
+ expectedStats.totalParticipantsAtCommit += 1;
+ // The one shard is targeted for the commit then the implicit abort.
+ expectedStats.totalRequestsTargeted += 1 + 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Successful single write shard transaction.");
+(() => {
+ startSingleWriteShardTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.singleWriteShard.initiated += 1;
+ expectedStats.commitTypes.singleWriteShard.successful += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Failed single write shard transaction.");
+(() => {
+ startSingleWriteShardTransaction();
+
+ abortFromUnderneath(st, session);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] += 1;
+ expectedStats.commitTypes.singleWriteShard.initiated += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ // In a single write shard commit, all read shards are committed first, then the
+ // write shards, so if committing on a read shard fails, the write shards aren't targeted.
+ // The implicit abort after will target all shards.
+ expectedStats.totalRequestsTargeted += 1 + 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Successful read only transaction.");
+(() => {
+ startReadOnlyTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.readOnly.initiated += 1;
+ expectedStats.commitTypes.readOnly.successful += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Failed read only transaction.");
+(() => {
+ startReadOnlyTransaction();
+
+ abortFromUnderneath(st, session);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] += 1;
+ expectedStats.commitTypes.readOnly.initiated += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ // Both shards are targeted for the commit then the implicit abort.
+ expectedStats.totalRequestsTargeted += 2 + 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Successful two phase commit transaction.");
+(() => {
+ startTwoPhaseCommitTransaction();
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.twoPhaseCommit.initiated += 1;
+ expectedStats.commitTypes.twoPhaseCommit.successful += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+
+ // Remove the inserted documents.
+ assert.commandWorked(sessionDB[collName].remove({_id: {$in: [-5, 5]}}));
+})();
+
+jsTest.log("Failed two phase commit transaction.");
+(() => {
+ startTwoPhaseCommitTransaction();
+
+ abortFromUnderneath(st, session);
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] += 1;
+ expectedStats.commitTypes.twoPhaseCommit.initiated += 1;
+ expectedStats.totalParticipantsAtCommit += 2;
+ // There are no implicit aborts after two phase commit, so the coordinator is targeted once.
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Recover successful commit result.");
+(() => {
+ const recoveryToken = setUpTransactionToRecoverCommit({shouldCommit: true});
+
+ assert.commandWorked(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: otherRouterSession.getSessionId(),
+ txnNumber: otherRouterSession.getTxnNumber_forTesting(),
+ autocommit: false,
+ recoveryToken
+ }));
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalCommitted += 1;
+ expectedStats.commitTypes.recoverWithToken.initiated += 1;
+ expectedStats.commitTypes.recoverWithToken.successful += 1;
+ // The participant stats shouldn't increase if we're recovering commit.
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Recover failed commit result.");
+(() => {
+ const recoveryToken = setUpTransactionToRecoverCommit({shouldCommit: false});
+
+ assert.commandFailedWithCode(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: otherRouterSession.getSessionId(),
+ txnNumber: otherRouterSession.getTxnNumber_forTesting(),
+ autocommit: false,
+ recoveryToken
+ }),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["NoSuchTransaction"] += 1;
+ expectedStats.commitTypes.recoverWithToken.initiated += 1;
+ // The participant stats shouldn't increase if we're recovering commit.
+ // There are no implicit aborts during commit recovery, so the recovery shard is targeted
+ // once.
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Empty recovery token.");
+(() => {
+ otherRouterSession.startTransaction();
+ let resWithEmptyRecoveryToken =
+ assert.commandWorked(otherRouterSessionDB.runCommand({find: collName}));
+ assert.commandWorked(otherRouterSession.commitTransaction_forTesting());
+
+ // The stats on the main mongos shouldn't have changed.
+ verifyServerStatusValues(st, expectedStats);
+
+ assert.commandFailedWithCode(st.s.adminCommand({
+ commitTransaction: 1,
+ lsid: otherRouterSession.getSessionId(),
+ txnNumber: otherRouterSession.getTxnNumber_forTesting(),
+ autocommit: false,
+ recoveryToken: resWithEmptyRecoveryToken.recoveryToken
+ }),
+ ErrorCodes.NoSuchTransaction);
+
+ expectedStats.totalStarted += 1;
+ expectedStats.commitTypes.recoverWithToken.initiated += 1;
+ // No requests are targeted and the decision isn't learned, so total committed/aborted and
+ // total requests sent shouldn't change.
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Explicitly aborted transaction.");
+(() => {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({x: 2}));
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["abort"] = 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Implicitly aborted transaction.");
+(() => {
+ session.startTransaction();
+ assert.commandFailedWithCode(sessionDB[collName].insert({_id: 1}), ErrorCodes.DuplicateKey);
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["DuplicateKey"] = 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 2; // Plus one for the implicit abort.
+ verifyServerStatusValues(st, expectedStats);
+
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ // A failed abortTransaction leads to an implicit abort, so two requests are targeted.
+ expectedStats.totalRequestsTargeted += 2;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+jsTest.log("Abandoned transaction.");
+(() => {
+ session.startTransaction();
+ assert.commandWorked(sessionDB[collName].insert({_id: -15}));
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+
+ session.startTransaction_forTesting({}, {ignoreActiveTxn: true});
+ assert.commandWorked(sessionDB[collName].insert({_id: -15}));
+
+ expectedStats.totalStarted += 1;
+ expectedStats.totalContactedParticipants += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ // The router never learned if the previous transaction committed or aborted, so the aborted
+ // counter shouldn't be incremented.
+ verifyServerStatusValues(st, expectedStats);
+
+ // Abort to clear the shell's session state.
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ expectedStats.totalAborted += 1;
+ expectedStats.abortCause["abort"] += 1;
+ expectedStats.totalRequestsTargeted += 1;
+ verifyServerStatusValues(st, expectedStats);
+})();
+
+session.endSession();
+st.stop();
}());
diff --git a/jstests/noPassthrough/server_read_concern_metrics.js b/jstests/noPassthrough/server_read_concern_metrics.js
index 0907138e0fd..b5c78c166fb 100644
--- a/jstests/noPassthrough/server_read_concern_metrics.js
+++ b/jstests/noPassthrough/server_read_concern_metrics.js
@@ -1,360 +1,358 @@
// Tests readConcern level metrics in the serverStatus output.
// @tags: [uses_transactions, requires_majority_read_concern]
(function() {
- "use strict";
+"use strict";
- // Verifies that the server status response has the fields that we expect.
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("opReadConcernCounters"),
- "Expected the serverStatus response to have a 'opReadConcernCounters' field\n" +
- tojson(serverStatusResponse));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("available"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'available' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("linearizable"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'linearizable' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("local"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'local' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("majority"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'majority' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(
- serverStatusResponse.opReadConcernCounters.hasOwnProperty("snapshot"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'snapshot' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("none"),
- "The 'opReadConcernCounters' field in serverStatus did not have the 'none' field\n" +
- tojson(serverStatusResponse.opReadConcernCounters));
- }
+// Verifies that the server status response has the fields that we expect.
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("opReadConcernCounters"),
+ "Expected the serverStatus response to have a 'opReadConcernCounters' field\n" +
+ tojson(serverStatusResponse));
+ assert(
+ serverStatusResponse.opReadConcernCounters.hasOwnProperty("available"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'available' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(
+ serverStatusResponse.opReadConcernCounters.hasOwnProperty("linearizable"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'linearizable' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("local"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'local' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("majority"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'majority' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("snapshot"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'snapshot' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+ assert(serverStatusResponse.opReadConcernCounters.hasOwnProperty("none"),
+ "The 'opReadConcernCounters' field in serverStatus did not have the 'none' field\n" +
+ tojson(serverStatusResponse.opReadConcernCounters));
+}
- // Verifies that the given value of the server status response is incremented in the way
- // we expect.
- function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
- assert.eq(initialStats[valueName] + expectedIncrement,
- newStats[valueName],
- "expected " + valueName + " to increase by " + expectedIncrement +
- ", initialStats: " + tojson(initialStats) + ", newStats: " +
- tojson(newStats));
- }
+// Verifies that the given value of the server status response is incremented in the way
+// we expect.
+function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
+ assert.eq(initialStats[valueName] + expectedIncrement,
+ newStats[valueName],
+ "expected " + valueName + " to increase by " + expectedIncrement +
+ ", initialStats: " + tojson(initialStats) + ", newStats: " + tojson(newStats));
+}
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
- const dbName = "test";
- const collName = "server_read_concern_metrics";
- const testDB = primary.getDB(dbName);
- const testColl = testDB[collName];
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testColl.insert({_id: 0}));
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
+const dbName = "test";
+const collName = "server_read_concern_metrics";
+const testDB = primary.getDB(dbName);
+const testColl = testDB[collName];
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testColl.insert({_id: 0}));
- // Run an initial transaction to get config.transactions state into memory.
- session.startTransaction();
- assert.eq(sessionColl.find().itcount(), 1);
- assert.commandWorked(session.abortTransaction_forTesting());
+// Run an initial transaction to get config.transactions state into memory.
+session.startTransaction();
+assert.eq(sessionColl.find().itcount(), 1);
+assert.commandWorked(session.abortTransaction_forTesting());
- // Get initial serverStatus.
- let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
+// Get initial serverStatus.
+let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(serverStatus);
- // Run a find with no readConcern.
- assert.eq(testColl.find().itcount(), 1);
- let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
- serverStatus = newStatus;
+// Run a find with no readConcern.
+assert.eq(testColl.find().itcount(), 1);
+let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
+serverStatus = newStatus;
- // Run a find with a readConcern with no level.
- assert.commandWorked(
- testDB.runCommand({find: collName, readConcern: {afterClusterTime: Timestamp(1, 1)}}));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
- serverStatus = newStatus;
+// Run a find with a readConcern with no level.
+assert.commandWorked(
+ testDB.runCommand({find: collName, readConcern: {afterClusterTime: Timestamp(1, 1)}}));
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
+serverStatus = newStatus;
- // Run a legacy query.
- primary.forceReadMode("legacy");
- assert.eq(testColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
- primary.forceReadMode("commands");
- serverStatus = newStatus;
+// Run a legacy query.
+primary.forceReadMode("legacy");
+assert.eq(testColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
+primary.forceReadMode("commands");
+serverStatus = newStatus;
- // Run a find with a readConcern level available.
- assert.eq(testColl.find().readConcern("available").itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find with a readConcern level available.
+assert.eq(testColl.find().readConcern("available").itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a find with a readConcern level linearizable.
- assert.eq(testColl.find().readConcern("linearizable").itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find with a readConcern level linearizable.
+assert.eq(testColl.find().readConcern("linearizable").itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a find with a readConcern level local.
- assert.eq(testColl.find().readConcern("local").itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find with a readConcern level local.
+assert.eq(testColl.find().readConcern("local").itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a find with a readConcern level majority.
- assert.eq(testColl.find().readConcern("majority").itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find with a readConcern level majority.
+assert.eq(testColl.find().readConcern("majority").itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a find in a transaction with readConcern level snapshot.
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- assert.commandWorked(session.abortTransaction_forTesting());
- serverStatus = newStatus;
+// Run a find in a transaction with readConcern level snapshot.
+session.startTransaction({readConcern: {level: "snapshot"}});
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+assert.commandWorked(session.abortTransaction_forTesting());
+serverStatus = newStatus;
- // Run a find in a transaction with no specified readConcern level.
- session.startTransaction();
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
- assert.commandWorked(session.abortTransaction_forTesting());
- serverStatus = newStatus;
+// Run a find in a transaction with no specified readConcern level.
+session.startTransaction();
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 1);
+assert.commandWorked(session.abortTransaction_forTesting());
+serverStatus = newStatus;
- // Run a find in a transaction with readConcern level local.
- session.startTransaction({readConcern: {level: "local"}});
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- assert.commandWorked(session.abortTransaction_forTesting());
- serverStatus = newStatus;
+// Run a find in a transaction with readConcern level local.
+session.startTransaction({readConcern: {level: "local"}});
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+assert.commandWorked(session.abortTransaction_forTesting());
+serverStatus = newStatus;
- // Run a find in a transaction with readConcern level majority.
- session.startTransaction({readConcern: {level: "majority"}});
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Run a find in a transaction with readConcern level majority.
+session.startTransaction({readConcern: {level: "majority"}});
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // Run a second find in the same transaction. It will inherit the readConcern from the
- // transaction.
- assert.eq(sessionColl.find().itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- assert.commandWorked(session.abortTransaction_forTesting());
- serverStatus = newStatus;
+// Run a second find in the same transaction. It will inherit the readConcern from the
+// transaction.
+assert.eq(sessionColl.find().itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 1);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+assert.commandWorked(session.abortTransaction_forTesting());
+serverStatus = newStatus;
- // Aggregation does not count toward readConcern metrics. Aggregation is counted as a 'command'
- // in the 'opCounters' serverStatus section, and we only track the readConcern of queries
- // tracked in 'opCounters.query'.
- assert.eq(testColl.aggregate([]).itcount(), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// Aggregation does not count toward readConcern metrics. Aggregation is counted as a 'command'
+// in the 'opCounters' serverStatus section, and we only track the readConcern of queries
+// tracked in 'opCounters.query'.
+assert.eq(testColl.aggregate([]).itcount(), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // The count command does not count toward readConcern metrics. The count command is counted as
- // a 'command' in the 'opCounters' serverStatus section, and we only track the readConcern of
- // queries tracked in 'opCounters.query'.
- assert.eq(testColl.count({_id: 0}), 1);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- serverStatus = newStatus;
+// The count command does not count toward readConcern metrics. The count command is counted as
+// a 'command' in the 'opCounters' serverStatus section, and we only track the readConcern of
+// queries tracked in 'opCounters.query'.
+assert.eq(testColl.count({_id: 0}), 1);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+serverStatus = newStatus;
- // getMore does not count toward readConcern metrics. getMore inherits the readConcern of the
- // originating command. It is not counted in 'opCounters.query'.
- let res = assert.commandWorked(testDB.runCommand({find: collName, batchSize: 0}));
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- assert.commandWorked(testDB.runCommand({getMore: res.cursor.id, collection: collName}));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
- verifyServerStatusChange(
- serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
+// getMore does not count toward readConcern metrics. getMore inherits the readConcern of the
+// originating command. It is not counted in 'opCounters.query'.
+let res = assert.commandWorked(testDB.runCommand({find: collName, batchSize: 0}));
+serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+assert.commandWorked(testDB.runCommand({getMore: res.cursor.id, collection: collName}));
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "available", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "linearizable", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "local", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "majority", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "snapshot", 0);
+verifyServerStatusChange(
+ serverStatus.opReadConcernCounters, newStatus.opReadConcernCounters, "none", 0);
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/server_transaction_metrics.js b/jstests/noPassthrough/server_transaction_metrics.js
index 4bd3c02c9d5..402e72da964 100644
--- a/jstests/noPassthrough/server_transaction_metrics.js
+++ b/jstests/noPassthrough/server_transaction_metrics.js
@@ -1,220 +1,202 @@
// Tests multi-document transactions metrics in the serverStatus output.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- // Verifies that the server status response has the fields that we expect.
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("transactions"),
- "Expected the serverStatus response to have a 'transactions' field\n" +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("currentActive"),
- "The 'transactions' field in serverStatus did not have the 'currentActive' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(
- serverStatusResponse.transactions.hasOwnProperty("currentInactive"),
- "The 'transactions' field in serverStatus did not have the 'currentInactive' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(serverStatusResponse.transactions.hasOwnProperty("currentOpen"),
- "The 'transactions' field in serverStatus did not have the 'currentOpen' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalAborted"),
- "The 'transactions' field in serverStatus did not have the 'totalAborted' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(
- serverStatusResponse.transactions.hasOwnProperty("totalCommitted"),
- "The 'transactions' field in serverStatus did not have the 'totalCommitted' field\n" +
- tojson(serverStatusResponse.transactions));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalStarted"),
- "The 'transactions' field in serverStatus did not have the 'totalStarted' field\n" +
- tojson(serverStatusResponse.transactions));
- }
-
- // Verifies that the given value of the server status response is incremented in the way
- // we expect.
- function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
- assert.eq(initialStats[valueName] + expectedIncrement,
- newStats[valueName],
- "expected " + valueName + " to increase by " + expectedIncrement);
- }
-
- // Set up the replica set.
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
-
- // Set up the test database.
- const dbName = "test";
- const collName = "server_transactions_metrics";
- const testDB = primary.getDB(dbName);
- const adminDB = rst.getPrimary().getDB('admin');
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Start the session.
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
+"use strict";
+
+// Verifies that the server status response has the fields that we expect.
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("transactions"),
+ "Expected the serverStatus response to have a 'transactions' field\n" +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("currentActive"),
+ "The 'transactions' field in serverStatus did not have the 'currentActive' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("currentInactive"),
+ "The 'transactions' field in serverStatus did not have the 'currentInactive' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("currentOpen"),
+ "The 'transactions' field in serverStatus did not have the 'currentOpen' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalAborted"),
+ "The 'transactions' field in serverStatus did not have the 'totalAborted' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalCommitted"),
+ "The 'transactions' field in serverStatus did not have the 'totalCommitted' field\n" +
+ tojson(serverStatusResponse.transactions));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalStarted"),
+ "The 'transactions' field in serverStatus did not have the 'totalStarted' field\n" +
+ tojson(serverStatusResponse.transactions));
+}
+
+// Verifies that the given value of the server status response is incremented in the way
+// we expect.
+function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
+ assert.eq(initialStats[valueName] + expectedIncrement,
+ newStats[valueName],
+ "expected " + valueName + " to increase by " + expectedIncrement);
+}
+
+// Set up the replica set.
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
+
+// Set up the test database.
+const dbName = "test";
+const collName = "server_transactions_metrics";
+const testDB = primary.getDB(dbName);
+const adminDB = rst.getPrimary().getDB('admin');
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Start the session.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+// Get state of server status before the transaction.
+let initialStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(initialStatus);
+
+// This transaction will commit.
+jsTest.log("Start a transaction and then commit it.");
+
+// Compare server status after starting a transaction with the server status before.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
+
+let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that the open transaction counter is incremented while inside the transaction.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
+// Verify that when not running an operation, the transaction is inactive.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
+
+// Compare server status after the transaction commit with the server status before.
+assert.commandWorked(session.commitTransaction_forTesting());
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
+// Verify that current open counter is decremented on commit.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
+// Verify that both active and inactive are 0 on commit.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// This transaction will abort.
+jsTest.log("Start a transaction and then abort it.");
+
+// Compare server status after starting a transaction with the server status before.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
+
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that the open transaction counter is incremented while inside the transaction.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
+// Verify that when not running an operation, the transaction is inactive.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
+
+// Compare server status after the transaction abort with the server status before.
+assert.commandWorked(session.abortTransaction_forTesting());
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 2);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalAborted", 1);
+// Verify that current open counter is decremented on abort.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
+// Verify that both active and inactive are 0 on abort.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// This transaction will abort due to a duplicate key insert.
+jsTest.log("Start a transaction that will abort on a duplicated key error.");
+
+// Compare server status after starting a transaction with the server status before.
+session.startTransaction();
+// Inserting a new document will work fine, and the transaction starts.
+assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
+
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that the open transaction counter is incremented while inside the transaction.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
+// Verify that when not running an operation, the transaction is inactive.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
+
+// Compare server status after the transaction abort with the server status before.
+// The duplicated insert will fail, causing the transaction to abort.
+assert.commandFailedWithCode(sessionColl.insert({_id: "insert-3"}), ErrorCodes.DuplicateKey);
+// Ensure that the transaction was aborted on failure.
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 3);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalAborted", 2);
+// Verify that current open counter is decremented on abort caused by an error.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
+// Verify that both active and inactive are 0 on abort.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// Hang the transaction on a failpoint in the middle of an operation to check active and
+// inactive counters while operation is running inside a transaction.
+jsTest.log("Start a transaction that will hang in the middle of an operation due to a fail point.");
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'alwaysOn'}));
+
+const transactionFn = function() {
+ const collName = 'server_transactions_metrics';
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDb = session.getDatabase('test');
const sessionColl = sessionDb[collName];
- // Get state of server status before the transaction.
- let initialStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(initialStatus);
-
- // This transaction will commit.
- jsTest.log("Start a transaction and then commit it.");
-
- // Compare server status after starting a transaction with the server status before.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-1"}));
-
- let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that the open transaction counter is incremented while inside the transaction.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
- // Verify that when not running an operation, the transaction is inactive.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
-
- // Compare server status after the transaction commit with the server status before.
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ assert.commandWorked(sessionColl.update({}, {"update-1": 2}));
assert.commandWorked(session.commitTransaction_forTesting());
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
- // Verify that current open counter is decremented on commit.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
- // Verify that both active and inactive are 0 on commit.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // This transaction will abort.
- jsTest.log("Start a transaction and then abort it.");
-
- // Compare server status after starting a transaction with the server status before.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "insert-2"}));
-
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that the open transaction counter is incremented while inside the transaction.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
- // Verify that when not running an operation, the transaction is inactive.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
-
- // Compare server status after the transaction abort with the server status before.
- assert.commandWorked(session.abortTransaction_forTesting());
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 2);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalAborted", 1);
- // Verify that current open counter is decremented on abort.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
- // Verify that both active and inactive are 0 on abort.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // This transaction will abort due to a duplicate key insert.
- jsTest.log("Start a transaction that will abort on a duplicated key error.");
-
- // Compare server status after starting a transaction with the server status before.
- session.startTransaction();
- // Inserting a new document will work fine, and the transaction starts.
- assert.commandWorked(sessionColl.insert({_id: "insert-3"}));
-
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that the open transaction counter is incremented while inside the transaction.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
- // Verify that when not running an operation, the transaction is inactive.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 1);
-
- // Compare server status after the transaction abort with the server status before.
- // The duplicated insert will fail, causing the transaction to abort.
- assert.commandFailedWithCode(sessionColl.insert({_id: "insert-3"}), ErrorCodes.DuplicateKey);
- // Ensure that the transaction was aborted on failure.
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalStarted", 3);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalCommitted", 1);
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalAborted", 2);
- // Verify that current open counter is decremented on abort caused by an error.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
- // Verify that both active and inactive are 0 on abort.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // Hang the transaction on a failpoint in the middle of an operation to check active and
- // inactive counters while operation is running inside a transaction.
- jsTest.log(
- "Start a transaction that will hang in the middle of an operation due to a fail point.");
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'alwaysOn'}));
-
- const transactionFn = function() {
- const collName = 'server_transactions_metrics';
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDb = session.getDatabase('test');
- const sessionColl = sessionDb[collName];
-
- session.startTransaction({readConcern: {level: 'snapshot'}});
- assert.commandWorked(sessionColl.update({}, {"update-1": 2}));
- assert.commandWorked(session.commitTransaction_forTesting());
+};
+const transactionProcess = startParallelShell(transactionFn, primary.port);
+
+// Keep running currentOp() until we see the transaction subdocument.
+assert.soon(function() {
+ const transactionFilter = {
+ active: true,
+ 'lsid': {$exists: true},
+ 'transaction.parameters.txnNumber': {$eq: 0}
};
- const transactionProcess = startParallelShell(transactionFn, primary.port);
-
- // Keep running currentOp() until we see the transaction subdocument.
- assert.soon(function() {
- const transactionFilter =
- {active: true, 'lsid': {$exists: true}, 'transaction.parameters.txnNumber': {$eq: 0}};
- return 1 === adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).itcount();
- });
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that the open transaction counter is incremented while inside the transaction.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
- // Verify that the metrics show that the transaction is active while inside the operation.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // Now the transaction can proceed.
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'off'}));
- transactionProcess();
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- // Verify that current open counter is decremented on commit.
- verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
- // Verify that both active and inactive are 0 after the transaction finishes.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentActive", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
-
- // End the session and stop the replica set.
- session.endSession();
- rst.stopSet();
+ return 1 === adminDB.aggregate([{$currentOp: {}}, {$match: transactionFilter}]).itcount();
+});
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that the open transaction counter is incremented while inside the transaction.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 1);
+// Verify that the metrics show that the transaction is active while inside the operation.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// Now the transaction can proceed.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'hangDuringBatchUpdate', mode: 'off'}));
+transactionProcess();
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+// Verify that current open counter is decremented on commit.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentOpen", 0);
+// Verify that both active and inactive are 0 after the transaction finishes.
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentActive", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentInactive", 0);
+
+// End the session and stop the replica set.
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js b/jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js
index 172e9e3e5a9..a41e66dfc2d 100644
--- a/jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js
+++ b/jstests/noPassthrough/server_transaction_metrics_for_prepared_transactions.js
@@ -3,175 +3,175 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/rslib.js");
-
- /**
- * Verifies that the serverStatus response has the fields that we expect.
- */
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("transactions"),
- "Expected the serverStatus response to have a 'transactions' field: " +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalPrepared"),
- "Expected the serverStatus response to have a 'totalPrepared' field: " +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalPreparedThenCommitted"),
- "Expected the serverStatus response to have a 'totalPreparedThenCommitted' field: " +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("totalPreparedThenAborted"),
- "Expected the serverStatus response to have a 'totalPreparedThenAborted' field: " +
- tojson(serverStatusResponse));
- assert(serverStatusResponse.transactions.hasOwnProperty("currentPrepared"),
- "Expected the serverStatus response to have a 'currentPrepared' field: " +
- tojson(serverStatusResponse));
- }
-
- /**
- * Verifies that the given value of the server status response is incremented in the way
- * we expect.
- */
- function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
- assert.eq(initialStats[valueName] + expectedIncrement,
- newStats[valueName],
- "expected " + valueName + " to increase by " + expectedIncrement);
- }
-
- /**
- * Verifies that the timestamp of the oldest active transaction in the transactions table
- * is greater than the lower bound and less than or equal to the upper bound.
- */
- function verifyOldestActiveTransactionTimestamp(testDB, lowerBound, upperBound) {
- let res = assert.commandWorked(
- testDB.getSiblingDB("config").getCollection("transactions").runCommand("find", {
- "filter": {"state": {"$in": ["prepared", "inProgress"]}},
- "sort": {"startOpTime": 1},
- "readConcern": {"level": "local"},
- "limit": 1
- }));
-
- let entry = res.cursor.firstBatch[0];
- assert.neq(undefined, entry);
-
- assert.lt(lowerBound,
- entry.startOpTime.ts,
- "oldest active transaction timestamp should be greater than the lower bound");
- assert.lte(
- entry.startOpTime.ts,
- upperBound,
- "oldest active transaction timestamp should be less than or equal to the upper bound");
- }
-
- // Set up the replica set.
- const rst = new ReplSetTest({nodes: 1});
-
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
-
- // Set up the test database.
- const dbName = "test";
- const collName = "server_transactions_metrics_for_prepared_transactions";
- const testDB = primary.getDB(dbName);
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Start the session.
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
-
- // Get state of server status before the transaction.
- const initialStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(initialStatus);
-
- // Test server metrics for a prepared transaction that is committed.
- jsTest.log("Prepare a transaction and then commit it");
-
- const doc1 = {_id: 1, x: 1};
-
- // Start transaction and prepare transaction.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc1));
-
- const opTimeBeforePrepareForCommit = getLastOpTime(primary);
- const prepareTimestampForCommit = PrepareHelpers.prepareTransaction(session);
-
- // Verify the total and current prepared transaction counter is updated and the oldest active
- // oplog entry timestamp is shown.
- let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPrepared", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentPrepared", 1);
-
- // Verify that the prepare entry has the oldest timestamp of any active transaction
- // in the transactions table.
- verifyOldestActiveTransactionTimestamp(
- testDB, opTimeBeforePrepareForCommit.ts, prepareTimestampForCommit);
-
- // Verify the total prepared and committed transaction counters are updated after a commit
- // and that the current prepared counter is decremented.
- PrepareHelpers.commitTransaction(session, prepareTimestampForCommit);
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPreparedThenCommitted", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentPrepared", 0);
-
- // Verify that other prepared transaction metrics have not changed.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPreparedThenAborted", 0);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPrepared", 1);
-
- // Test server metrics for a prepared transaction that is aborted.
- jsTest.log("Prepare a transaction and then abort it");
-
- const doc2 = {_id: 2, x: 2};
-
- // Start transaction and prepare transaction.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(doc2));
-
- const opTimeBeforePrepareForAbort = getLastOpTime(primary);
- const prepareTimestampForAbort = PrepareHelpers.prepareTransaction(session);
-
- // Verify that the total and current prepared counter is updated and the oldest active oplog
- // entry timestamp is shown.
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPrepared", 2);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentPrepared", 1);
-
- // Verify that the prepare entry has the oldest timestamp of any active transaction
- // in the transactions table.
- verifyOldestActiveTransactionTimestamp(
- testDB, opTimeBeforePrepareForAbort.ts, prepareTimestampForAbort);
-
- // Verify the total prepared and aborted transaction counters are updated after an abort and the
- // current prepared counter is decremented.
- assert.commandWorked(session.abortTransaction_forTesting());
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(newStatus);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPreparedThenAborted", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "currentPrepared", 0);
-
- // Verify that other prepared transaction metrics have not changed.
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPreparedThenCommitted", 1);
- verifyServerStatusChange(
- initialStatus.transactions, newStatus.transactions, "totalPrepared", 2);
-
- // End the session and stop the replica set.
- session.endSession();
- rst.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/rslib.js");
+
+/**
+ * Verifies that the serverStatus response has the fields that we expect.
+ */
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("transactions"),
+ "Expected the serverStatus response to have a 'transactions' field: " +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalPrepared"),
+ "Expected the serverStatus response to have a 'totalPrepared' field: " +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalPreparedThenCommitted"),
+ "Expected the serverStatus response to have a 'totalPreparedThenCommitted' field: " +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("totalPreparedThenAborted"),
+ "Expected the serverStatus response to have a 'totalPreparedThenAborted' field: " +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.transactions.hasOwnProperty("currentPrepared"),
+ "Expected the serverStatus response to have a 'currentPrepared' field: " +
+ tojson(serverStatusResponse));
+}
+
+/**
+ * Verifies that the given value of the server status response is incremented in the way
+ * we expect.
+ */
+function verifyServerStatusChange(initialStats, newStats, valueName, expectedIncrement) {
+ assert.eq(initialStats[valueName] + expectedIncrement,
+ newStats[valueName],
+ "expected " + valueName + " to increase by " + expectedIncrement);
+}
+
+/**
+ * Verifies that the timestamp of the oldest active transaction in the transactions table
+ * is greater than the lower bound and less than or equal to the upper bound.
+ */
+function verifyOldestActiveTransactionTimestamp(testDB, lowerBound, upperBound) {
+ let res = assert.commandWorked(
+ testDB.getSiblingDB("config").getCollection("transactions").runCommand("find", {
+ "filter": {"state": {"$in": ["prepared", "inProgress"]}},
+ "sort": {"startOpTime": 1},
+ "readConcern": {"level": "local"},
+ "limit": 1
+ }));
+
+ let entry = res.cursor.firstBatch[0];
+ assert.neq(undefined, entry);
+
+ assert.lt(lowerBound,
+ entry.startOpTime.ts,
+ "oldest active transaction timestamp should be greater than the lower bound");
+ assert.lte(
+ entry.startOpTime.ts,
+ upperBound,
+ "oldest active transaction timestamp should be less than or equal to the upper bound");
+}
+
+// Set up the replica set.
+const rst = new ReplSetTest({nodes: 1});
+
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
+
+// Set up the test database.
+const dbName = "test";
+const collName = "server_transactions_metrics_for_prepared_transactions";
+const testDB = primary.getDB(dbName);
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Start the session.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+
+// Get state of server status before the transaction.
+const initialStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(initialStatus);
+
+// Test server metrics for a prepared transaction that is committed.
+jsTest.log("Prepare a transaction and then commit it");
+
+const doc1 = {
+ _id: 1,
+ x: 1
+};
+
+// Start transaction and prepare transaction.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc1));
+
+const opTimeBeforePrepareForCommit = getLastOpTime(primary);
+const prepareTimestampForCommit = PrepareHelpers.prepareTransaction(session);
+
+// Verify the total and current prepared transaction counter is updated and the oldest active
+// oplog entry timestamp is shown.
+let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalPrepared", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentPrepared", 1);
+
+// Verify that the prepare entry has the oldest timestamp of any active transaction
+// in the transactions table.
+verifyOldestActiveTransactionTimestamp(
+ testDB, opTimeBeforePrepareForCommit.ts, prepareTimestampForCommit);
+
+// Verify the total prepared and committed transaction counters are updated after a commit
+// and that the current prepared counter is decremented.
+PrepareHelpers.commitTransaction(session, prepareTimestampForCommit);
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ initialStatus.transactions, newStatus.transactions, "totalPreparedThenCommitted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentPrepared", 0);
+
+// Verify that other prepared transaction metrics have not changed.
+verifyServerStatusChange(
+ initialStatus.transactions, newStatus.transactions, "totalPreparedThenAborted", 0);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalPrepared", 1);
+
+// Test server metrics for a prepared transaction that is aborted.
+jsTest.log("Prepare a transaction and then abort it");
+
+const doc2 = {
+ _id: 2,
+ x: 2
+};
+
+// Start transaction and prepare transaction.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(doc2));
+
+const opTimeBeforePrepareForAbort = getLastOpTime(primary);
+const prepareTimestampForAbort = PrepareHelpers.prepareTransaction(session);
+
+// Verify that the total and current prepared counter is updated and the oldest active oplog
+// entry timestamp is shown.
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalPrepared", 2);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentPrepared", 1);
+
+// Verify that the prepare entry has the oldest timestamp of any active transaction
+// in the transactions table.
+verifyOldestActiveTransactionTimestamp(
+ testDB, opTimeBeforePrepareForAbort.ts, prepareTimestampForAbort);
+
+// Verify the total prepared and aborted transaction counters are updated after an abort and the
+// current prepared counter is decremented.
+assert.commandWorked(session.abortTransaction_forTesting());
+newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+verifyServerStatusFields(newStatus);
+verifyServerStatusChange(
+ initialStatus.transactions, newStatus.transactions, "totalPreparedThenAborted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "currentPrepared", 0);
+
+// Verify that other prepared transaction metrics have not changed.
+verifyServerStatusChange(
+ initialStatus.transactions, newStatus.transactions, "totalPreparedThenCommitted", 1);
+verifyServerStatusChange(initialStatus.transactions, newStatus.transactions, "totalPrepared", 2);
+
+// End the session and stop the replica set.
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/server_transaction_metrics_kill_sessions.js b/jstests/noPassthrough/server_transaction_metrics_kill_sessions.js
index 3b3fd3ec94b..a4f7aba5a08 100644
--- a/jstests/noPassthrough/server_transaction_metrics_kill_sessions.js
+++ b/jstests/noPassthrough/server_transaction_metrics_kill_sessions.js
@@ -1,82 +1,83 @@
// Tests multi-document transactions metrics are still correct after 'killSessions'.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- // Verifies that the given value of the transaction metrics is incremented in the way we expect.
- function verifyMetricsChange(initialStats, newStats, valueName, expectedIncrement) {
- assert.eq(initialStats[valueName] + expectedIncrement,
- newStats[valueName],
- "expected " + valueName + " to increase by " + expectedIncrement +
- ".\nInitial stats: " + tojson(initialStats) + "; New stats: " +
- tojson(newStats));
- }
-
- // Set up the replica set and enable majority read concern for atClusterTime snapshot reads.
- const rst = new ReplSetTest({nodes: 1, nodeOptions: {enableMajorityReadConcern: "true"}});
- rst.startSet();
- rst.initiate();
-
- const dbName = "test";
- const collName = "server_transactions_metrics_kill_sessions";
- const testDB = rst.getPrimary().getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const sessionOptions = {causalConsistency: false};
- let session = testDB.getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
-
- let initialMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
-
- jsTest.log("Start a transaction.");
- session.startTransaction();
- assert.commandWorked(sessionDb.runCommand({find: collName}));
-
- let newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
- verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 1);
- verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 1);
-
- jsTest.log("Kill session " + tojson(session.getSessionId()) + ".");
- assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
-
- newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
- verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "totalCommitted", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "totalAborted", 1);
- verifyMetricsChange(initialMetrics, newMetrics, "totalStarted", 1);
-
- session.endSession();
-
- session = testDB.getMongo().startSession(sessionOptions);
- sessionDb = session.getDatabase(dbName);
-
- jsTest.log("Start a snapshot transaction at a time that is too old.");
- session.startTransaction({readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}});
- // Operation runs unstashTransactionResources() and throws prior to onUnstash(). As a result,
- // the transaction will be implicitly aborted.
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.SnapshotTooOld);
-
- newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
- verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
-
- // Kill the session that threw exception before.
- jsTest.log("Kill session " + tojson(session.getSessionId()) + ".");
- assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
-
- newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
- verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "totalCommitted", 0);
- verifyMetricsChange(initialMetrics, newMetrics, "totalAborted", 2);
- verifyMetricsChange(initialMetrics, newMetrics, "totalStarted", 2);
-
- session.endSession();
-
- rst.stopSet();
+"use strict";
+
+// Verifies that the given value of the transaction metrics is incremented in the way we expect.
+function verifyMetricsChange(initialStats, newStats, valueName, expectedIncrement) {
+ assert.eq(initialStats[valueName] + expectedIncrement,
+ newStats[valueName],
+ "expected " + valueName + " to increase by " + expectedIncrement +
+ ".\nInitial stats: " + tojson(initialStats) + "; New stats: " + tojson(newStats));
+}
+
+// Set up the replica set and enable majority read concern for atClusterTime snapshot reads.
+const rst = new ReplSetTest({nodes: 1, nodeOptions: {enableMajorityReadConcern: "true"}});
+rst.startSet();
+rst.initiate();
+
+const dbName = "test";
+const collName = "server_transactions_metrics_kill_sessions";
+const testDB = rst.getPrimary().getDB(dbName);
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = testDB.getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+
+let initialMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+
+jsTest.log("Start a transaction.");
+session.startTransaction();
+assert.commandWorked(sessionDb.runCommand({find: collName}));
+
+let newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 1);
+verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 1);
+
+jsTest.log("Kill session " + tojson(session.getSessionId()) + ".");
+assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
+
+newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "totalCommitted", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "totalAborted", 1);
+verifyMetricsChange(initialMetrics, newMetrics, "totalStarted", 1);
+
+session.endSession();
+
+session = testDB.getMongo().startSession(sessionOptions);
+sessionDb = session.getDatabase(dbName);
+
+jsTest.log("Start a snapshot transaction at a time that is too old.");
+session.startTransaction({readConcern: {level: "snapshot", atClusterTime: Timestamp(1, 1)}});
+// Operation runs unstashTransactionResources() and throws prior to onUnstash(). As a result,
+// the transaction will be implicitly aborted.
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.SnapshotTooOld);
+
+newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
+
+// Kill the session that threw exception before.
+jsTest.log("Kill session " + tojson(session.getSessionId()) + ".");
+assert.commandWorked(testDB.runCommand({killSessions: [session.getSessionId()]}));
+
+newMetrics = assert.commandWorked(testDB.adminCommand({serverStatus: 1})).transactions;
+verifyMetricsChange(initialMetrics, newMetrics, "currentActive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentInactive", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "currentOpen", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "totalCommitted", 0);
+verifyMetricsChange(initialMetrics, newMetrics, "totalAborted", 2);
+verifyMetricsChange(initialMetrics, newMetrics, "totalStarted", 2);
+
+session.endSession();
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/server_transaction_metrics_secondary.js b/jstests/noPassthrough/server_transaction_metrics_secondary.js
index 0f28b9e5667..9464dd77fc1 100644
--- a/jstests/noPassthrough/server_transaction_metrics_secondary.js
+++ b/jstests/noPassthrough/server_transaction_metrics_secondary.js
@@ -1,79 +1,80 @@
// Test that transactions run on secondaries do not change the serverStatus transaction metrics.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- jsTest.setOption("enableTestCommands", false);
- TestData.authenticationDatabase = "local";
+jsTest.setOption("enableTestCommands", false);
+TestData.authenticationDatabase = "local";
- const dbName = "test";
- const collName = "server_transaction_metrics_secondary";
+const dbName = "test";
+const collName = "server_transaction_metrics_secondary";
- // Start up the replica set. We want a stable topology, so make the secondary unelectable.
- const replTest = new ReplSetTest({name: collName, nodes: 2});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[1].priority = 0;
- replTest.initiate(config);
+// Start up the replica set. We want a stable topology, so make the secondary unelectable.
+const replTest = new ReplSetTest({name: collName, nodes: 2});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[1].priority = 0;
+replTest.initiate(config);
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
- // Set slaveOk=true so that normal read commands would be allowed on the secondary.
- secondary.setSlaveOk(true);
+// Set slaveOk=true so that normal read commands would be allowed on the secondary.
+secondary.setSlaveOk(true);
- // Create a test collection that we can run commands against.
- assert.commandWorked(primary.getDB(dbName)[collName].insert({_id: 0}));
- replTest.awaitLastOpCommitted();
+// Create a test collection that we can run commands against.
+assert.commandWorked(primary.getDB(dbName)[collName].insert({_id: 0}));
+replTest.awaitLastOpCommitted();
- // Initiate a session on the secondary.
- const sessionOptions = {causalConsistency: false};
- const secondarySession = secondary.getDB(dbName).getMongo().startSession(sessionOptions);
- let secDb = secondarySession.getDatabase(dbName);
- let metrics;
+// Initiate a session on the secondary.
+const sessionOptions = {
+ causalConsistency: false
+};
+const secondarySession = secondary.getDB(dbName).getMongo().startSession(sessionOptions);
+let secDb = secondarySession.getDatabase(dbName);
+let metrics;
- jsTestLog("Trying to start transaction on secondary.");
- secondarySession.startTransaction();
+jsTestLog("Trying to start transaction on secondary.");
+secondarySession.startTransaction();
- // Initially there are no transactions in the system.
- metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
- .transactions;
- assert.eq(0, metrics.currentActive);
- assert.eq(0, metrics.currentInactive);
- assert.eq(0, metrics.currentOpen);
- assert.eq(0, metrics.totalAborted);
- assert.eq(0, metrics.totalCommitted);
- assert.eq(0, metrics.totalStarted);
+// Initially there are no transactions in the system.
+metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
+ .transactions;
+assert.eq(0, metrics.currentActive);
+assert.eq(0, metrics.currentInactive);
+assert.eq(0, metrics.currentOpen);
+assert.eq(0, metrics.totalAborted);
+assert.eq(0, metrics.totalCommitted);
+assert.eq(0, metrics.totalStarted);
- jsTestLog("Run transaction statement.");
- assert.eq(assert.throws(() => secDb[collName].findOne({_id: 0})).code, ErrorCodes.NotMaster);
+jsTestLog("Run transaction statement.");
+assert.eq(assert.throws(() => secDb[collName].findOne({_id: 0})).code, ErrorCodes.NotMaster);
- // The metrics are not affected.
- metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
- .transactions;
- assert.eq(0, metrics.currentActive);
- assert.eq(0, metrics.currentInactive);
- assert.eq(0, metrics.currentOpen);
- assert.eq(0, metrics.totalAborted);
- assert.eq(0, metrics.totalCommitted);
- assert.eq(0, metrics.totalStarted);
+// The metrics are not affected.
+metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
+ .transactions;
+assert.eq(0, metrics.currentActive);
+assert.eq(0, metrics.currentInactive);
+assert.eq(0, metrics.currentOpen);
+assert.eq(0, metrics.totalAborted);
+assert.eq(0, metrics.totalCommitted);
+assert.eq(0, metrics.totalStarted);
- jsTestLog("Abort the transaction.");
- assert.commandFailedWithCode(secondarySession.abortTransaction_forTesting(),
- ErrorCodes.NotMaster);
+jsTestLog("Abort the transaction.");
+assert.commandFailedWithCode(secondarySession.abortTransaction_forTesting(), ErrorCodes.NotMaster);
- // The metrics are not affected.
- metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
- .transactions;
- assert.eq(0, metrics.currentActive);
- assert.eq(0, metrics.currentInactive);
- assert.eq(0, metrics.currentOpen);
- assert.eq(0, metrics.totalAborted);
- assert.eq(0, metrics.totalCommitted);
- assert.eq(0, metrics.totalStarted);
+// The metrics are not affected.
+metrics = assert.commandWorked(secondary.adminCommand({serverStatus: 1, repl: 0, metrics: 0}))
+ .transactions;
+assert.eq(0, metrics.currentActive);
+assert.eq(0, metrics.currentInactive);
+assert.eq(0, metrics.currentOpen);
+assert.eq(0, metrics.totalAborted);
+assert.eq(0, metrics.totalCommitted);
+assert.eq(0, metrics.totalStarted);
- jsTestLog("Done trying transaction on secondary.");
- secondarySession.endSession();
+jsTestLog("Done trying transaction on secondary.");
+secondarySession.endSession();
- replTest.stopSet();
+replTest.stopSet();
}()); \ No newline at end of file
diff --git a/jstests/noPassthrough/server_write_concern_metrics.js b/jstests/noPassthrough/server_write_concern_metrics.js
index d9ea528f1c7..88ad7d5b13c 100644
--- a/jstests/noPassthrough/server_write_concern_metrics.js
+++ b/jstests/noPassthrough/server_write_concern_metrics.js
@@ -1,213 +1,212 @@
// Tests writeConcern metrics in the serverStatus output.
// @tags: [requires_persistence, requires_journaling, requires_replication]
(function() {
- "use strict";
-
- // Verifies that the server status response has the fields that we expect.
- function verifyServerStatusFields(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("opWriteConcernCounters"),
- "Expected the serverStatus response to have a 'opWriteConcernCounters' field\n" +
- tojson(serverStatusResponse));
- assert(
- serverStatusResponse.opWriteConcernCounters.hasOwnProperty("insert"),
- "The 'opWriteConcernCounters' field in serverStatus did not have the 'insert' field\n" +
- tojson(serverStatusResponse.opWriteConcernCounters));
- assert(
- serverStatusResponse.opWriteConcernCounters.hasOwnProperty("update"),
- "The 'opWriteConcernCounters' field in serverStatus did not have the 'update' field\n" +
- tojson(serverStatusResponse.opWriteConcernCounters));
- assert(
- serverStatusResponse.opWriteConcernCounters.hasOwnProperty("delete"),
- "The 'opWriteConcernCounters' field in serverStatus did not have the 'delete' field\n" +
- tojson(serverStatusResponse.opWriteConcernCounters));
+"use strict";
+
+// Verifies that the server status response has the fields that we expect.
+function verifyServerStatusFields(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("opWriteConcernCounters"),
+ "Expected the serverStatus response to have a 'opWriteConcernCounters' field\n" +
+ tojson(serverStatusResponse));
+ assert(serverStatusResponse.opWriteConcernCounters.hasOwnProperty("insert"),
+ "The 'opWriteConcernCounters' field in serverStatus did not have the 'insert' field\n" +
+ tojson(serverStatusResponse.opWriteConcernCounters));
+ assert(serverStatusResponse.opWriteConcernCounters.hasOwnProperty("update"),
+ "The 'opWriteConcernCounters' field in serverStatus did not have the 'update' field\n" +
+ tojson(serverStatusResponse.opWriteConcernCounters));
+ assert(serverStatusResponse.opWriteConcernCounters.hasOwnProperty("delete"),
+ "The 'opWriteConcernCounters' field in serverStatus did not have the 'delete' field\n" +
+ tojson(serverStatusResponse.opWriteConcernCounters));
+}
+
+// Verifies that the given path of the server status response is incremented in the way we
+// expect, and no other changes occurred. This function modifies its inputs.
+function verifyServerStatusChange(initialStats, newStats, path, expectedIncrement) {
+ // Traverse to the parent of the changed element.
+ let pathComponents = path.split(".");
+ let initialParent = initialStats;
+ let newParent = newStats;
+ for (let i = 0; i < pathComponents.length - 1; i++) {
+ assert(initialParent.hasOwnProperty(pathComponents[i]),
+ "initialStats did not contain component " + i + " of path " + path +
+ ", initialStats: " + tojson(initialStats));
+ initialParent = initialParent[pathComponents[i]];
+
+ assert(newParent.hasOwnProperty(pathComponents[i]),
+ "newStats did not contain component " + i + " of path " + path +
+ ", newStats: " + tojson(newStats));
+ newParent = newParent[pathComponents[i]];
}
- // Verifies that the given path of the server status response is incremented in the way we
- // expect, and no other changes occurred. This function modifies its inputs.
- function verifyServerStatusChange(initialStats, newStats, path, expectedIncrement) {
- // Traverse to the parent of the changed element.
- let pathComponents = path.split(".");
- let initialParent = initialStats;
- let newParent = newStats;
- for (let i = 0; i < pathComponents.length - 1; i++) {
- assert(initialParent.hasOwnProperty(pathComponents[i]),
- "initialStats did not contain component " + i + " of path " + path +
- ", initialStats: " + tojson(initialStats));
- initialParent = initialParent[pathComponents[i]];
-
- assert(newParent.hasOwnProperty(pathComponents[i]),
- "newStats did not contain component " + i + " of path " + path + ", newStats: " +
- tojson(newStats));
- newParent = newParent[pathComponents[i]];
- }
-
- // Test the expected increment of the changed element. The element may not exist in the
- // initial stats, in which case it is treated as 0.
- let lastPathComponent = pathComponents[pathComponents.length - 1];
- let initialValue = 0;
- if (initialParent.hasOwnProperty(lastPathComponent)) {
- initialValue = initialParent[lastPathComponent];
- }
- assert(newParent.hasOwnProperty(lastPathComponent),
- "newStats did not contain last component of path " + path + ", newStats: " +
- tojson(newStats));
- assert.eq(initialValue + expectedIncrement,
- newParent[lastPathComponent],
- "expected " + path + " to increase by " + expectedIncrement + ", initialStats: " +
- tojson(initialStats) + ", newStats: " + tojson(newStats));
-
- // Delete the changed element.
- delete initialParent[lastPathComponent];
- delete newParent[lastPathComponent];
-
- // The stats objects should be equal without the changed element.
- assert.eq(0,
- bsonWoCompare(initialStats, newStats),
- "expected initialStats and newStats to be equal after removing " + path +
- ", initialStats: " + tojson(initialStats) + ", newStats: " +
- tojson(newStats));
+ // Test the expected increment of the changed element. The element may not exist in the
+ // initial stats, in which case it is treated as 0.
+ let lastPathComponent = pathComponents[pathComponents.length - 1];
+ let initialValue = 0;
+ if (initialParent.hasOwnProperty(lastPathComponent)) {
+ initialValue = initialParent[lastPathComponent];
}
-
- const rst = new ReplSetTest(
- {nodes: 2, nodeOptions: {setParameter: 'reportOpWriteConcernCountersInServerStatus=true'}});
- rst.startSet();
- let config = rst.getReplSetConfig();
- config.members[1].priority = 0;
- config.members[0].tags = {dc_va: "rack1"};
- config.settings = {getLastErrorModes: {myTag: {dc_va: 1}}};
- rst.initiate(config);
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const dbName = "test";
- const collName = "server_write_concern_metrics";
- const testDB = primary.getDB(dbName);
- const testColl = testDB[collName];
-
- function resetCollection() {
- testColl.drop();
- assert.commandWorked(testDB.createCollection(collName));
- }
-
- function testWriteConcernMetrics(cmd, opName, inc) {
- // Run command with no writeConcern.
- resetCollection();
- let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(testDB.runCommand(cmd));
- let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".none",
- inc);
-
- // Run command with writeConcern {j: true}. This should be counted as having no 'w' value.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {j: true}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".none",
- inc);
-
- // Run command with writeConcern {w: "majority"}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(testDB.runCommand(
- Object.assign(Object.assign({}, cmd), {writeConcern: {w: "majority"}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wmajority",
- inc);
-
- // Run command with writeConcern {w: 0}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 0}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wnum.0",
- inc);
-
- // Run command with writeConcern {w: 1}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 1}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wnum.1",
- inc);
-
- // Run command with writeConcern {w: 2}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 2}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wnum.2",
- inc);
-
- // Run command with writeConcern {w: "myTag"}.
- resetCollection();
- serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(
- testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: "myTag"}})));
- newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(serverStatus.opWriteConcernCounters,
- newStatus.opWriteConcernCounters,
- opName + ".wtag.myTag",
- inc);
-
- // writeConcern metrics are not tracked on the secondary.
- resetCollection();
- serverStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
- verifyServerStatusFields(serverStatus);
- assert.commandWorked(testDB.runCommand(cmd));
- newStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
- assert.eq(
- 0,
- bsonWoCompare(serverStatus.opWriteConcernCounters, newStatus.opWriteConcernCounters),
- "expected no change in secondary writeConcern metrics, before: " +
- tojson(serverStatus) + ", after: " + tojson(newStatus));
- }
-
- // Test single insert/update/delete.
- testWriteConcernMetrics({insert: collName, documents: [{}]}, "insert", 1);
- testWriteConcernMetrics({update: collName, updates: [{q: {}, u: {$set: {a: 1}}}]}, "update", 1);
- testWriteConcernMetrics({delete: collName, deletes: [{q: {}, limit: 1}]}, "delete", 1);
-
- // Test batch writes.
- testWriteConcernMetrics({insert: collName, documents: [{}, {}]}, "insert", 2);
- testWriteConcernMetrics(
- {update: collName, updates: [{q: {}, u: {$set: {a: 1}}}, {q: {}, u: {$set: {a: 1}}}]},
- "update",
- 2);
- testWriteConcernMetrics(
- {delete: collName, deletes: [{q: {}, limit: 1}, {q: {}, limit: 1}]}, "delete", 2);
-
- // Test applyOps.
- testWriteConcernMetrics(
- {applyOps: [{op: "i", ns: testColl.getFullName(), o: {_id: 0}}]}, "insert", 1);
- testWriteConcernMetrics(
- {applyOps: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 1}}}]},
- "update",
- 1);
- testWriteConcernMetrics(
- {applyOps: [{op: "d", ns: testColl.getFullName(), o: {_id: 0}}]}, "delete", 1);
-
- rst.stopSet();
+ assert(newParent.hasOwnProperty(lastPathComponent),
+ "newStats did not contain last component of path " + path +
+ ", newStats: " + tojson(newStats));
+ assert.eq(initialValue + expectedIncrement,
+ newParent[lastPathComponent],
+ "expected " + path + " to increase by " + expectedIncrement +
+ ", initialStats: " + tojson(initialStats) + ", newStats: " + tojson(newStats));
+
+ // Delete the changed element.
+ delete initialParent[lastPathComponent];
+ delete newParent[lastPathComponent];
+
+ // The stats objects should be equal without the changed element.
+ assert.eq(0,
+ bsonWoCompare(initialStats, newStats),
+ "expected initialStats and newStats to be equal after removing " + path +
+ ", initialStats: " + tojson(initialStats) + ", newStats: " + tojson(newStats));
+}
+
+const rst = new ReplSetTest(
+ {nodes: 2, nodeOptions: {setParameter: 'reportOpWriteConcernCountersInServerStatus=true'}});
+rst.startSet();
+let config = rst.getReplSetConfig();
+config.members[1].priority = 0;
+config.members[0].tags = {
+ dc_va: "rack1"
+};
+config.settings = {
+ getLastErrorModes: {myTag: {dc_va: 1}}
+};
+rst.initiate(config);
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const dbName = "test";
+const collName = "server_write_concern_metrics";
+const testDB = primary.getDB(dbName);
+const testColl = testDB[collName];
+
+function resetCollection() {
+ testColl.drop();
+ assert.commandWorked(testDB.createCollection(collName));
+}
+
+function testWriteConcernMetrics(cmd, opName, inc) {
+ // Run command with no writeConcern.
+ resetCollection();
+ let serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(testDB.runCommand(cmd));
+ let newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".none",
+ inc);
+
+ // Run command with writeConcern {j: true}. This should be counted as having no 'w' value.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {j: true}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".none",
+ inc);
+
+ // Run command with writeConcern {w: "majority"}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: "majority"}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wmajority",
+ inc);
+
+ // Run command with writeConcern {w: 0}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 0}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wnum.0",
+ inc);
+
+ // Run command with writeConcern {w: 1}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 1}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wnum.1",
+ inc);
+
+ // Run command with writeConcern {w: 2}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: 2}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wnum.2",
+ inc);
+
+ // Run command with writeConcern {w: "myTag"}.
+ resetCollection();
+ serverStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(
+ testDB.runCommand(Object.assign(Object.assign({}, cmd), {writeConcern: {w: "myTag"}})));
+ newStatus = assert.commandWorked(testDB.adminCommand({serverStatus: 1}));
+ verifyServerStatusChange(serverStatus.opWriteConcernCounters,
+ newStatus.opWriteConcernCounters,
+ opName + ".wtag.myTag",
+ inc);
+
+ // writeConcern metrics are not tracked on the secondary.
+ resetCollection();
+ serverStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
+ verifyServerStatusFields(serverStatus);
+ assert.commandWorked(testDB.runCommand(cmd));
+ newStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
+ assert.eq(0,
+ bsonWoCompare(serverStatus.opWriteConcernCounters, newStatus.opWriteConcernCounters),
+ "expected no change in secondary writeConcern metrics, before: " +
+ tojson(serverStatus) + ", after: " + tojson(newStatus));
+}
+
+// Test single insert/update/delete.
+testWriteConcernMetrics({insert: collName, documents: [{}]}, "insert", 1);
+testWriteConcernMetrics({update: collName, updates: [{q: {}, u: {$set: {a: 1}}}]}, "update", 1);
+testWriteConcernMetrics({delete: collName, deletes: [{q: {}, limit: 1}]}, "delete", 1);
+
+// Test batch writes.
+testWriteConcernMetrics({insert: collName, documents: [{}, {}]}, "insert", 2);
+testWriteConcernMetrics(
+ {update: collName, updates: [{q: {}, u: {$set: {a: 1}}}, {q: {}, u: {$set: {a: 1}}}]},
+ "update",
+ 2);
+testWriteConcernMetrics(
+ {delete: collName, deletes: [{q: {}, limit: 1}, {q: {}, limit: 1}]}, "delete", 2);
+
+// Test applyOps.
+testWriteConcernMetrics(
+ {applyOps: [{op: "i", ns: testColl.getFullName(), o: {_id: 0}}]}, "insert", 1);
+testWriteConcernMetrics(
+ {applyOps: [{op: "u", ns: testColl.getFullName(), o2: {_id: 0}, o: {$set: {a: 1}}}]},
+ "update",
+ 1);
+testWriteConcernMetrics(
+ {applyOps: [{op: "d", ns: testColl.getFullName(), o: {_id: 0}}]}, "delete", 1);
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/session_w0.js b/jstests/noPassthrough/session_w0.js
index dd219581f43..5f6f29c0ec8 100644
--- a/jstests/noPassthrough/session_w0.js
+++ b/jstests/noPassthrough/session_w0.js
@@ -2,19 +2,18 @@
* Explicit shell session should prohibit w: 0 writes.
*/
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- const session = conn.startSession();
- const sessionColl = session.getDatabase("test").getCollection("foo");
- const err = assert.throws(() => {
- sessionColl.insert({x: 1}, {writeConcern: {w: 0}});
- });
+const conn = MongoRunner.runMongod();
+const session = conn.startSession();
+const sessionColl = session.getDatabase("test").getCollection("foo");
+const err = assert.throws(() => {
+ sessionColl.insert({x: 1}, {writeConcern: {w: 0}});
+});
- assert.includes(err.toString(),
- "Unacknowledged writes are prohibited with sessions",
- "wrong error message");
+assert.includes(
+ err.toString(), "Unacknowledged writes are prohibited with sessions", "wrong error message");
- session.endSession();
- MongoRunner.stopMongod(conn);
+session.endSession();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/sessions_collection_auto_healing.js b/jstests/noPassthrough/sessions_collection_auto_healing.js
index f12ed2fb91b..8f1851aa408 100644
--- a/jstests/noPassthrough/sessions_collection_auto_healing.js
+++ b/jstests/noPassthrough/sessions_collection_auto_healing.js
@@ -1,60 +1,59 @@
load('jstests/libs/sessions_collection.js');
(function() {
- "use strict";
-
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
-
- let timeoutMinutes = 5;
-
- var startSession = {startSession: 1};
- var conn = MongoRunner.runMongod(
- {setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes});
-
- var admin = conn.getDB("admin");
- var config = conn.getDB("config");
-
- // Test that we can use sessions before the sessions collection exists.
- {
- validateSessionsCollection(conn, false, false, timeoutMinutes);
- assert.commandWorked(admin.runCommand({startSession: 1}));
- validateSessionsCollection(conn, false, false, timeoutMinutes);
- }
-
- // Test that a refresh will create the sessions collection.
- {
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(conn, true, true, timeoutMinutes);
- }
-
- // Test that a refresh will (re)create the TTL index on the sessions collection.
- {
- assert.commandWorked(config.system.sessions.dropIndex({lastUse: 1}));
- validateSessionsCollection(conn, true, false, timeoutMinutes);
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(conn, true, true, timeoutMinutes);
- }
-
- MongoRunner.stopMongod(conn);
-
- timeoutMinutes = 4;
- conn = MongoRunner.runMongod({
- restart: conn,
- cleanData: false,
- setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes
- });
- admin = conn.getDB("admin");
- config = conn.getDB("config");
-
- // Test that a change to the TTL index expiration on restart will generate a collMod to change
- // the expiration time.
- {
- assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(conn, true, true, timeoutMinutes);
- }
-
- MongoRunner.stopMongod(conn);
-
+"use strict";
+
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
+
+let timeoutMinutes = 5;
+
+var startSession = {startSession: 1};
+var conn =
+ MongoRunner.runMongod({setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes});
+
+var admin = conn.getDB("admin");
+var config = conn.getDB("config");
+
+// Test that we can use sessions before the sessions collection exists.
+{
+ validateSessionsCollection(conn, false, false, timeoutMinutes);
+ assert.commandWorked(admin.runCommand({startSession: 1}));
+ validateSessionsCollection(conn, false, false, timeoutMinutes);
+}
+
+// Test that a refresh will create the sessions collection.
+{
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(conn, true, true, timeoutMinutes);
+}
+
+// Test that a refresh will (re)create the TTL index on the sessions collection.
+{
+ assert.commandWorked(config.system.sessions.dropIndex({lastUse: 1}));
+ validateSessionsCollection(conn, true, false, timeoutMinutes);
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(conn, true, true, timeoutMinutes);
+}
+
+MongoRunner.stopMongod(conn);
+
+timeoutMinutes = 4;
+conn = MongoRunner.runMongod({
+ restart: conn,
+ cleanData: false,
+ setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes
+});
+admin = conn.getDB("admin");
+config = conn.getDB("config");
+
+// Test that a change to the TTL index expiration on restart will generate a collMod to change
+// the expiration time.
+{
+ assert.commandWorked(admin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ validateSessionsCollection(conn, true, true, timeoutMinutes);
+}
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/set_step_params.js b/jstests/noPassthrough/set_step_params.js
index 08a4e36422b..d3fbe5deb02 100644
--- a/jstests/noPassthrough/set_step_params.js
+++ b/jstests/noPassthrough/set_step_params.js
@@ -5,270 +5,270 @@ load("jstests/libs/parallelTester.js");
*/
(function() {
- "use strict";
-
- const kDbName = 'test';
-
- const minConns = 4;
- var stepParams = {
- ShardingTaskExecutorPoolMinSize: minConns,
- ShardingTaskExecutorPoolMaxSize: 10,
- ShardingTaskExecutorPoolMaxConnecting: 5,
- ShardingTaskExecutorPoolHostTimeoutMS: 300000,
- ShardingTaskExecutorPoolRefreshRequirementMS: 60000,
- ShardingTaskExecutorPoolRefreshTimeoutMS: 20000,
- ShardingTaskExecutorPoolReplicaSetMatching: "disabled",
- };
-
- const st = new ShardingTest({
- config: {nodes: 1},
- shards: 1,
- rs0: {nodes: 1},
- mongos: [{setParameter: stepParams}],
+"use strict";
+
+const kDbName = 'test';
+
+const minConns = 4;
+var stepParams = {
+ ShardingTaskExecutorPoolMinSize: minConns,
+ ShardingTaskExecutorPoolMaxSize: 10,
+ ShardingTaskExecutorPoolMaxConnecting: 5,
+ ShardingTaskExecutorPoolHostTimeoutMS: 300000,
+ ShardingTaskExecutorPoolRefreshRequirementMS: 60000,
+ ShardingTaskExecutorPoolRefreshTimeoutMS: 20000,
+ ShardingTaskExecutorPoolReplicaSetMatching: "disabled",
+};
+
+const st = new ShardingTest({
+ config: {nodes: 1},
+ shards: 1,
+ rs0: {nodes: 1},
+ mongos: [{setParameter: stepParams}],
+});
+const mongos = st.s0;
+const rst = st.rs0;
+const primary = rst.getPrimary();
+
+const cfg = primary.getDB('local').system.replset.findOne();
+const allHosts = cfg.members.map(x => x.host);
+const mongosDB = mongos.getDB(kDbName);
+const primaryOnly = [primary.name];
+
+function configureReplSetFailpoint(name, modeValue) {
+ st.rs0.nodes.forEach(function(node) {
+ assert.commandWorked(node.getDB("admin").runCommand({
+ configureFailPoint: name,
+ mode: modeValue,
+ data: {shouldCheckForInterrupt: true},
+ }));
});
- const mongos = st.s0;
- const rst = st.rs0;
- const primary = rst.getPrimary();
-
- const cfg = primary.getDB('local').system.replset.findOne();
- const allHosts = cfg.members.map(x => x.host);
- const mongosDB = mongos.getDB(kDbName);
- const primaryOnly = [primary.name];
-
- function configureReplSetFailpoint(name, modeValue) {
- st.rs0.nodes.forEach(function(node) {
- assert.commandWorked(node.getDB("admin").runCommand({
- configureFailPoint: name,
- mode: modeValue,
- data: {shouldCheckForInterrupt: true},
- }));
- });
+}
+
+var threads = [];
+function launchFinds({times, readPref, shouldFail}) {
+ jsTestLog("Starting " + times + " connections");
+ for (var i = 0; i < times; i++) {
+ var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
+ var client = new Mongo(connStr);
+ const ret = client.getDB(dbName).runCommand(
+ {find: "test", limit: 1, "$readPreference": {mode: readPref}});
+
+ if (shouldFail) {
+ assert.commandFailed(ret);
+ } else {
+ assert.commandWorked(ret);
+ }
+ }, st.s.host, readPref, kDbName, shouldFail);
+ thread.start();
+ threads.push(thread);
}
-
- var threads = [];
- function launchFinds({times, readPref, shouldFail}) {
- jsTestLog("Starting " + times + " connections");
- for (var i = 0; i < times; i++) {
- var thread = new Thread(function(connStr, readPref, dbName, shouldFail) {
- var client = new Mongo(connStr);
- const ret = client.getDB(dbName).runCommand(
- {find: "test", limit: 1, "$readPreference": {mode: readPref}});
-
- if (shouldFail) {
- assert.commandFailed(ret);
- } else {
- assert.commandWorked(ret);
- }
- }, st.s.host, readPref, kDbName, shouldFail);
- thread.start();
- threads.push(thread);
+}
+
+var currentCheckNum = 0;
+function hasConnPoolStats(args) {
+ const checkNum = currentCheckNum++;
+ jsTestLog("Check #" + checkNum + ": " + tojson(args));
+ var {ready, pending, active, hosts, isAbsent} = args;
+
+ ready = ready ? ready : 0;
+ pending = pending ? pending : 0;
+ active = active ? active : 0;
+ hosts = hosts ? hosts : allHosts;
+
+ function checkStats(res, host) {
+ var stats = res.hosts[host];
+ if (!stats) {
+ jsTestLog("Connection stats for " + host + " are absent");
+ return isAbsent;
}
+
+ jsTestLog("Connection stats for " + host + ": " + tojson(stats));
+ return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
}
- var currentCheckNum = 0;
- function hasConnPoolStats(args) {
- const checkNum = currentCheckNum++;
- jsTestLog("Check #" + checkNum + ": " + tojson(args));
- var {ready, pending, active, hosts, isAbsent} = args;
-
- ready = ready ? ready : 0;
- pending = pending ? pending : 0;
- active = active ? active : 0;
- hosts = hosts ? hosts : allHosts;
-
- function checkStats(res, host) {
- var stats = res.hosts[host];
- if (!stats) {
- jsTestLog("Connection stats for " + host + " are absent");
- return isAbsent;
- }
+ function checkAllStats() {
+ var res = mongos.adminCommand({connPoolStats: 1});
+ return hosts.map(host => checkStats(res, host)).every(x => x);
+ }
- jsTestLog("Connection stats for " + host + ": " + tojson(stats));
- return stats.available == ready && stats.refreshing == pending && stats.inUse == active;
- }
+ assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
- function checkAllStats() {
- var res = mongos.adminCommand({connPoolStats: 1});
- return hosts.map(host => checkStats(res, host)).every(x => x);
- }
+ jsTestLog("Check #" + checkNum + " successful");
+}
- assert.soon(checkAllStats, "Check #" + checkNum + " failed", 10000);
+function updateSetParameters(params) {
+ var cmd = Object.assign({"setParameter": 1}, params);
+ assert.commandWorked(mongos.adminCommand(cmd));
+}
- jsTestLog("Check #" + checkNum + " successful");
- }
+function dropConnections() {
+ assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
+}
- function updateSetParameters(params) {
- var cmd = Object.assign({"setParameter": 1}, params);
- assert.commandWorked(mongos.adminCommand(cmd));
- }
+function resetPools() {
+ dropConnections();
+ mongos.adminCommand({multicast: {ping: 0}});
+ hasConnPoolStats({ready: 4});
+}
- function dropConnections() {
- assert.commandWorked(mongos.adminCommand({dropConnections: 1, hostAndPort: allHosts}));
- }
+function runSubTest(name, fun) {
+ jsTestLog("Running test for " + name);
- function resetPools() {
- dropConnections();
- mongos.adminCommand({multicast: {ping: 0}});
- hasConnPoolStats({ready: 4});
- }
+ resetPools();
- function runSubTest(name, fun) {
- jsTestLog("Running test for " + name);
+ fun();
- resetPools();
+ updateSetParameters(stepParams);
+}
- fun();
+assert.writeOK(mongosDB.test.insert({x: 1}));
+assert.writeOK(mongosDB.test.insert({x: 2}));
+assert.writeOK(mongosDB.test.insert({x: 3}));
+st.rs0.awaitReplication();
- updateSetParameters(stepParams);
- }
+runSubTest("MinSize", function() {
+ dropConnections();
- assert.writeOK(mongosDB.test.insert({x: 1}));
- assert.writeOK(mongosDB.test.insert({x: 2}));
- assert.writeOK(mongosDB.test.insert({x: 3}));
- st.rs0.awaitReplication();
+ // Launch an initial find to trigger to min
+ launchFinds({times: 1, readPref: "primary"});
+ hasConnPoolStats({ready: minConns});
- runSubTest("MinSize", function() {
- dropConnections();
+ // Increase by one
+ updateSetParameters({ShardingTaskExecutorPoolMinSize: 5});
+ hasConnPoolStats({ready: 5});
- // Launch an initial find to trigger to min
- launchFinds({times: 1, readPref: "primary"});
- hasConnPoolStats({ready: minConns});
+ // Increase to MaxSize
+ updateSetParameters({ShardingTaskExecutorPoolMinSize: 10});
+ hasConnPoolStats({ready: 10});
- // Increase by one
- updateSetParameters({ShardingTaskExecutorPoolMinSize: 5});
- hasConnPoolStats({ready: 5});
+ // Decrease to zero
+ updateSetParameters({ShardingTaskExecutorPoolMinSize: 0});
+});
- // Increase to MaxSize
- updateSetParameters({ShardingTaskExecutorPoolMinSize: 10});
- hasConnPoolStats({ready: 10});
+runSubTest("MaxSize", function() {
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ dropConnections();
- // Decrease to zero
- updateSetParameters({ShardingTaskExecutorPoolMinSize: 0});
- });
+ // Launch 10 blocked finds
+ launchFinds({times: 10, readPref: "primary"});
+ hasConnPoolStats({active: 10, hosts: primaryOnly});
- runSubTest("MaxSize", function() {
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- dropConnections();
+ // Increase by 5 and Launch another 4 blocked finds
+ updateSetParameters({ShardingTaskExecutorPoolMaxSize: 15});
+ launchFinds({times: 4, readPref: "primary"});
+ hasConnPoolStats({active: 14, hosts: primaryOnly});
- // Launch 10 blocked finds
- launchFinds({times: 10, readPref: "primary"});
- hasConnPoolStats({active: 10, hosts: primaryOnly});
+ // Launch yet another 2, these should add only 1 connection
+ launchFinds({times: 2, readPref: "primary"});
+ hasConnPoolStats({active: 15, hosts: primaryOnly});
- // Increase by 5 and Launch another 4 blocked finds
- updateSetParameters({ShardingTaskExecutorPoolMaxSize: 15});
- launchFinds({times: 4, readPref: "primary"});
- hasConnPoolStats({active: 14, hosts: primaryOnly});
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ hasConnPoolStats({ready: 15, pending: 0, hosts: primaryOnly});
+});
- // Launch yet another 2, these should add only 1 connection
- launchFinds({times: 2, readPref: "primary"});
- hasConnPoolStats({active: 15, hosts: primaryOnly});
+// Test maxConnecting
+runSubTest("MaxConnecting", function() {
+ const maxPending1 = 2;
+ const maxPending2 = 4;
+ const conns = 6;
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
- hasConnPoolStats({ready: 15, pending: 0, hosts: primaryOnly});
+ updateSetParameters({
+ ShardingTaskExecutorPoolMaxSize: 100,
+ ShardingTaskExecutorPoolMaxConnecting: maxPending1,
});
- // Test maxConnecting
- runSubTest("MaxConnecting", function() {
- const maxPending1 = 2;
- const maxPending2 = 4;
- const conns = 6;
-
- updateSetParameters({
- ShardingTaskExecutorPoolMaxSize: 100,
- ShardingTaskExecutorPoolMaxConnecting: maxPending1,
- });
-
- configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- dropConnections();
-
- // Go to the limit of maxConnecting, so we're stuck here
- launchFinds({times: maxPending1, readPref: "primary"});
- hasConnPoolStats({pending: maxPending1});
-
- // More won't run right now
- launchFinds({times: conns - maxPending1, readPref: "primary"});
- hasConnPoolStats({pending: maxPending1});
-
- // If we increase our limit, it should fill in some of the connections
- updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending2});
- hasConnPoolStats({pending: maxPending2});
-
- // Dropping the limit doesn't cause us to drop pending
- updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending1});
- hasConnPoolStats({pending: maxPending2});
-
- // Release our pending and walk away
- configureReplSetFailpoint("waitInIsMaster", "off");
- hasConnPoolStats({active: conns});
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ dropConnections();
+
+ // Go to the limit of maxConnecting, so we're stuck here
+ launchFinds({times: maxPending1, readPref: "primary"});
+ hasConnPoolStats({pending: maxPending1});
+
+ // More won't run right now
+ launchFinds({times: conns - maxPending1, readPref: "primary"});
+ hasConnPoolStats({pending: maxPending1});
+
+ // If we increase our limit, it should fill in some of the connections
+ updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending2});
+ hasConnPoolStats({pending: maxPending2});
+
+ // Dropping the limit doesn't cause us to drop pending
+ updateSetParameters({ShardingTaskExecutorPoolMaxConnecting: maxPending1});
+ hasConnPoolStats({pending: maxPending2});
+
+ // Release our pending and walk away
+ configureReplSetFailpoint("waitInIsMaster", "off");
+ hasConnPoolStats({active: conns});
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+});
+
+runSubTest("Timeouts", function() {
+ const conns = minConns;
+ const pendingTimeoutMS = 5000;
+ const toRefreshTimeoutMS = 1000;
+ const idleTimeoutMS1 = 20000;
+ const idleTimeoutMS2 = 15500;
+
+ // Updating separately since the validation depends on existing params
+ updateSetParameters({
+ ShardingTaskExecutorPoolRefreshTimeoutMS: pendingTimeoutMS,
+ });
+ updateSetParameters({
+ ShardingTaskExecutorPoolRefreshRequirementMS: toRefreshTimeoutMS,
});
+ updateSetParameters({
+ ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS1,
+ });
+
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
+ dropConnections();
+
+ // Make ready connections
+ launchFinds({times: conns, readPref: "primary"});
+ configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
+ hasConnPoolStats({ready: conns});
+
+ // Block refreshes and wait for the toRefresh timeout
+ configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
+ sleep(toRefreshTimeoutMS);
+
+ // Confirm that we're in pending for all of our conns
+ hasConnPoolStats({pending: conns});
- runSubTest("Timeouts", function() {
- const conns = minConns;
- const pendingTimeoutMS = 5000;
- const toRefreshTimeoutMS = 1000;
- const idleTimeoutMS1 = 20000;
- const idleTimeoutMS2 = 15500;
-
- // Updating separately since the validation depends on existing params
- updateSetParameters({
- ShardingTaskExecutorPoolRefreshTimeoutMS: pendingTimeoutMS,
- });
- updateSetParameters({
- ShardingTaskExecutorPoolRefreshRequirementMS: toRefreshTimeoutMS,
- });
- updateSetParameters({
- ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS1,
- });
-
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "alwaysOn");
- dropConnections();
-
- // Make ready connections
- launchFinds({times: conns, readPref: "primary"});
- configureReplSetFailpoint("waitInFindBeforeMakingBatch", "off");
- hasConnPoolStats({ready: conns});
-
- // Block refreshes and wait for the toRefresh timeout
- configureReplSetFailpoint("waitInIsMaster", "alwaysOn");
- sleep(toRefreshTimeoutMS);
-
- // Confirm that we're in pending for all of our conns
- hasConnPoolStats({pending: conns});
-
- // Set our min conns to 0 to make sure we don't refresh after pending timeout
- updateSetParameters({
- ShardingTaskExecutorPoolMinSize: 0,
- });
-
- // Wait for our pending timeout
- sleep(pendingTimeoutMS);
- hasConnPoolStats({});
-
- configureReplSetFailpoint("waitInIsMaster", "off");
-
- // Reset the min conns to make sure normal refresh doesn't extend the timeout
- updateSetParameters({
- ShardingTaskExecutorPoolMinSize: minConns,
- });
-
- // Wait for our host timeout and confirm the pool drops
- sleep(idleTimeoutMS1);
- hasConnPoolStats({isAbsent: true});
-
- // Reset the pool
- resetPools();
-
- // Sleep for a shorter timeout and then update so we're already expired
- sleep(idleTimeoutMS2);
- updateSetParameters({ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS2});
- hasConnPoolStats({isAbsent: true});
+ // Set our min conns to 0 to make sure we don't refresh after pending timeout
+ updateSetParameters({
+ ShardingTaskExecutorPoolMinSize: 0,
});
- threads.forEach(function(thread) {
- thread.join();
+ // Wait for our pending timeout
+ sleep(pendingTimeoutMS);
+ hasConnPoolStats({});
+
+ configureReplSetFailpoint("waitInIsMaster", "off");
+
+ // Reset the min conns to make sure normal refresh doesn't extend the timeout
+ updateSetParameters({
+ ShardingTaskExecutorPoolMinSize: minConns,
});
- st.stop();
+ // Wait for our host timeout and confirm the pool drops
+ sleep(idleTimeoutMS1);
+ hasConnPoolStats({isAbsent: true});
+
+ // Reset the pool
+ resetPools();
+
+ // Sleep for a shorter timeout and then update so we're already expired
+ sleep(idleTimeoutMS2);
+ updateSetParameters({ShardingTaskExecutorPoolHostTimeoutMS: idleTimeoutMS2});
+ hasConnPoolStats({isAbsent: true});
+});
+
+threads.forEach(function(thread) {
+ thread.join();
+});
+
+st.stop();
})();
diff --git a/jstests/noPassthrough/setshellparameter.js b/jstests/noPassthrough/setshellparameter.js
index 9fd17abb605..deed3dc4076 100644
--- a/jstests/noPassthrough/setshellparameter.js
+++ b/jstests/noPassthrough/setshellparameter.js
@@ -1,22 +1,20 @@
// Test --setShellParameter CLI switch.
(function() {
- 'use strict';
+'use strict';
- function test(ssp, succeed) {
- const result =
- runMongoProgram('./mongo', '--setShellParameter', ssp, '--nodb', '--eval', ';');
- assert.eq(0 == result,
- succeed,
- '--setShellParameter ' + ssp + 'worked/didn\'t-work unexpectedly');
- }
+function test(ssp, succeed) {
+ const result = runMongoProgram('./mongo', '--setShellParameter', ssp, '--nodb', '--eval', ';');
+ assert.eq(
+ 0 == result, succeed, '--setShellParameter ' + ssp + 'worked/didn\'t-work unexpectedly');
+}
- // Whitelisted
- test('disabledSecureAllocatorDomains=foo', true);
+// Whitelisted
+test('disabledSecureAllocatorDomains=foo', true);
- // Not whitelisted
- test('enableTestCommands=1', false);
+// Not whitelisted
+test('enableTestCommands=1', false);
- // Unknown
- test('theAnswerToTheQuestionOfLifeTheUniverseAndEverything=42', false);
+// Unknown
+test('theAnswerToTheQuestionOfLifeTheUniverseAndEverything=42', false);
})();
diff --git a/jstests/noPassthrough/shard_fixture_selftest.js b/jstests/noPassthrough/shard_fixture_selftest.js
index b4b56ba74d6..dde664865c1 100644
--- a/jstests/noPassthrough/shard_fixture_selftest.js
+++ b/jstests/noPassthrough/shard_fixture_selftest.js
@@ -2,55 +2,55 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
+'use strict';
- load('jstests/concurrency/fsm_libs/shard_fixture.js');
+load('jstests/concurrency/fsm_libs/shard_fixture.js');
- const rsTestOriginal = new ShardingTest({
- shards: 2,
- mongos: 2,
- config: 2,
- shardAsReplicaSet: true,
- });
+const rsTestOriginal = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ config: 2,
+ shardAsReplicaSet: true,
+});
- const rsTestWrapper =
- new FSMShardingTest(`mongodb://${rsTestOriginal.s0.host},${rsTestOriginal.s1.host}`);
+const rsTestWrapper =
+ new FSMShardingTest(`mongodb://${rsTestOriginal.s0.host},${rsTestOriginal.s1.host}`);
- assert.eq(rsTestWrapper.s(0).host, rsTestOriginal.s0.host);
- assert.eq(rsTestWrapper.s(1).host, rsTestOriginal.s1.host);
- assert.eq(rsTestWrapper.s(2), rsTestOriginal.s2); // Both should be undefined.
+assert.eq(rsTestWrapper.s(0).host, rsTestOriginal.s0.host);
+assert.eq(rsTestWrapper.s(1).host, rsTestOriginal.s1.host);
+assert.eq(rsTestWrapper.s(2), rsTestOriginal.s2); // Both should be undefined.
- assert.eq(rsTestWrapper.shard(0).host, rsTestOriginal.shard0.host);
- assert.eq(rsTestWrapper.shard(1).host, rsTestOriginal.shard1.host);
- assert.eq(rsTestWrapper.shard(2), rsTestOriginal.shard2); // Both should be undefined.
+assert.eq(rsTestWrapper.shard(0).host, rsTestOriginal.shard0.host);
+assert.eq(rsTestWrapper.shard(1).host, rsTestOriginal.shard1.host);
+assert.eq(rsTestWrapper.shard(2), rsTestOriginal.shard2); // Both should be undefined.
- assert.eq(rsTestWrapper.rs(0).getURL(), rsTestOriginal.rs0.getURL());
- assert.eq(rsTestWrapper.rs(1).getURL(), rsTestOriginal.rs1.getURL());
- assert.eq(rsTestWrapper.rs(2), rsTestOriginal.rs2); // Both should be undefined.
+assert.eq(rsTestWrapper.rs(0).getURL(), rsTestOriginal.rs0.getURL());
+assert.eq(rsTestWrapper.rs(1).getURL(), rsTestOriginal.rs1.getURL());
+assert.eq(rsTestWrapper.rs(2), rsTestOriginal.rs2); // Both should be undefined.
- assert.eq(rsTestWrapper.d(0), rsTestOriginal.d0); // Both should be undefined.
+assert.eq(rsTestWrapper.d(0), rsTestOriginal.d0); // Both should be undefined.
- assert.eq(rsTestWrapper.c(0).host, rsTestOriginal.c0.host);
- assert.eq(rsTestWrapper.c(1).host, rsTestOriginal.c1.host);
- assert.eq(rsTestWrapper.c(2), rsTestOriginal.c2); // Both should be undefined.
+assert.eq(rsTestWrapper.c(0).host, rsTestOriginal.c0.host);
+assert.eq(rsTestWrapper.c(1).host, rsTestOriginal.c1.host);
+assert.eq(rsTestWrapper.c(2), rsTestOriginal.c2); // Both should be undefined.
- rsTestOriginal.stop();
+rsTestOriginal.stop();
- const dTestOriginal = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- shardAsReplicaSet: false,
- });
+const dTestOriginal = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ shardAsReplicaSet: false,
+});
- const dTestWrapper = new FSMShardingTest(dTestOriginal.s.host);
+const dTestWrapper = new FSMShardingTest(dTestOriginal.s.host);
- assert.eq(dTestWrapper.shard(0).host, dTestOriginal.shard0.host);
- assert.eq(dTestWrapper.s(0).host, dTestOriginal.s0.host);
- assert.eq(dTestWrapper.d(0).host, dTestOriginal.d0.host);
- assert.eq(dTestWrapper.c(0).host, dTestOriginal.c0.host);
+assert.eq(dTestWrapper.shard(0).host, dTestOriginal.shard0.host);
+assert.eq(dTestWrapper.s(0).host, dTestOriginal.s0.host);
+assert.eq(dTestWrapper.d(0).host, dTestOriginal.d0.host);
+assert.eq(dTestWrapper.c(0).host, dTestOriginal.c0.host);
- assert.eq(dTestWrapper.rs(0), dTestOriginal.rs0); // Both should be undefined.
+assert.eq(dTestWrapper.rs(0), dTestOriginal.rs0); // Both should be undefined.
- dTestOriginal.stop();
+dTestOriginal.stop();
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/shell_appname_uri.js b/jstests/noPassthrough/shell_appname_uri.js
index e7c43164c11..c3a087c1a5b 100644
--- a/jstests/noPassthrough/shell_appname_uri.js
+++ b/jstests/noPassthrough/shell_appname_uri.js
@@ -1,77 +1,77 @@
// @tags: [requires_profiling]
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- const uri = "mongodb://" + conn.host + "/test";
- const tests = [];
+const conn = MongoRunner.runMongod();
+const uri = "mongodb://" + conn.host + "/test";
+const tests = [];
- // Asserts that system.profile contains only entries
- // with application.name = appname (or undefined)
- function assertProfileOnlyContainsAppName(db, appname) {
- const res = db.system.profile.distinct("appName");
- assert(res.length > 0, "system.profile does not contain any docs");
- if (res.length > 1 || res.indexOf(appname) === -1) {
- // Dump collection.
- print("dumping db.system.profile");
- db.system.profile.find().forEach((doc) => printjsononeline(doc));
- doassert(`system.profile expected to only have appName=${appname}` +
- ` but found ${tojson(res)}`);
- }
+// Asserts that system.profile contains only entries
+// with application.name = appname (or undefined)
+function assertProfileOnlyContainsAppName(db, appname) {
+ const res = db.system.profile.distinct("appName");
+ assert(res.length > 0, "system.profile does not contain any docs");
+ if (res.length > 1 || res.indexOf(appname) === -1) {
+ // Dump collection.
+ print("dumping db.system.profile");
+ db.system.profile.find().forEach((doc) => printjsononeline(doc));
+ doassert(`system.profile expected to only have appName=${appname}` +
+ ` but found ${tojson(res)}`);
}
+}
- tests.push(function testDefaultAppName() {
- const db = new Mongo(uri).getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, "MongoDB Shell");
- });
+tests.push(function testDefaultAppName() {
+ const db = new Mongo(uri).getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, "MongoDB Shell");
+});
- tests.push(function testAppName() {
- const db = new Mongo(uri + "?appName=TestAppName").getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, "TestAppName");
- });
+tests.push(function testAppName() {
+ const db = new Mongo(uri + "?appName=TestAppName").getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, "TestAppName");
+});
- tests.push(function testMultiWordAppName() {
- const db = new Mongo(uri + "?appName=Test%20App%20Name").getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, "Test App Name");
- });
+tests.push(function testMultiWordAppName() {
+ const db = new Mongo(uri + "?appName=Test%20App%20Name").getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, "Test App Name");
+});
- tests.push(function testLongAppName() {
- // From MongoDB Handshake specification:
- // The client.application.name cannot exceed 128 bytes. MongoDB will return an error if
- // these limits are not adhered to, which will result in handshake failure. Drivers MUST
- // validate these values and truncate driver provided values if necessary.
- const longAppName = "a".repeat(129);
- assert.throws(() => new Mongo(uri + "?appName=" + longAppName));
+tests.push(function testLongAppName() {
+ // From MongoDB Handshake specification:
+ // The client.application.name cannot exceed 128 bytes. MongoDB will return an error if
+ // these limits are not adhered to, which will result in handshake failure. Drivers MUST
+ // validate these values and truncate driver provided values if necessary.
+ const longAppName = "a".repeat(129);
+ assert.throws(() => new Mongo(uri + "?appName=" + longAppName));
- // But a 128 character appname should connect without issue.
- const notTooLongAppName = "a".repeat(128);
- const db = new Mongo(uri + "?appName=" + notTooLongAppName).getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, notTooLongAppName);
- });
+ // But a 128 character appname should connect without issue.
+ const notTooLongAppName = "a".repeat(128);
+ const db = new Mongo(uri + "?appName=" + notTooLongAppName).getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, notTooLongAppName);
+});
- tests.push(function testLongAppNameWithMultiByteUTF8() {
- // Each epsilon character is two bytes in UTF-8.
- const longAppName = "\u0190".repeat(65);
- assert.throws(() => new Mongo(uri + "?appName=" + longAppName));
+tests.push(function testLongAppNameWithMultiByteUTF8() {
+ // Each epsilon character is two bytes in UTF-8.
+ const longAppName = "\u0190".repeat(65);
+ assert.throws(() => new Mongo(uri + "?appName=" + longAppName));
- // But a 128 character appname should connect without issue.
- const notTooLongAppName = "\u0190".repeat(64);
- const db = new Mongo(uri + "?appName=" + notTooLongAppName).getDB("test");
- assert.commandWorked(db.coll.insert({}));
- assertProfileOnlyContainsAppName(db, notTooLongAppName);
- });
+ // But a 128 character appname should connect without issue.
+ const notTooLongAppName = "\u0190".repeat(64);
+ const db = new Mongo(uri + "?appName=" + notTooLongAppName).getDB("test");
+ assert.commandWorked(db.coll.insert({}));
+ assertProfileOnlyContainsAppName(db, notTooLongAppName);
+});
- tests.forEach((test) => {
- const db = conn.getDB("test");
- db.dropDatabase();
- // Entries in db.system.profile have application name.
- db.setProfilingLevel(2);
- test();
- });
+tests.forEach((test) => {
+ const db = conn.getDB("test");
+ db.dropDatabase();
+ // Entries in db.system.profile have application name.
+ db.setProfilingLevel(2);
+ test();
+});
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/shell_can_retry_writes.js b/jstests/noPassthrough/shell_can_retry_writes.js
index 60ef8df9cd1..e07b64e287f 100644
--- a/jstests/noPassthrough/shell_can_retry_writes.js
+++ b/jstests/noPassthrough/shell_can_retry_writes.js
@@ -4,169 +4,169 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const db = primary.startSession({retryWrites: true}).getDatabase("test");
- const coll = db.shell_can_retry_writes;
+const primary = rst.getPrimary();
+const db = primary.startSession({retryWrites: true}).getDatabase("test");
+const coll = db.shell_can_retry_writes;
- function testCommandCanBeRetried(func, expected = true) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+function testCommandCanBeRetried(func, expected = true) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const sentinel = {};
- let cmdObjSeen = sentinel;
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- try {
- assert.doesNotThrow(func);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
+ try {
+ assert.doesNotThrow(func);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
+ }
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
- }
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
+ }
- let cmdName = Object.keys(cmdObjSeen)[0];
+ let cmdName = Object.keys(cmdObjSeen)[0];
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObjSeen)[0];
- }
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObjSeen)[0];
+ }
- assert(cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) + " to have a logical session id: " +
+ assert(cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to have a logical session id: " + func.toString());
+
+ if (expected) {
+ assert(
+ cmdObjSeen.hasOwnProperty("txnNumber"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to be assigned a transaction number since it can be retried: " + func.toString());
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("txnNumber"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to not be assigned a transaction number since it cannot be retried: " +
func.toString());
-
- if (expected) {
- assert(cmdObjSeen.hasOwnProperty("txnNumber"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to be assigned a transaction number since it can be retried: " +
- func.toString());
- } else {
- assert(!cmdObjSeen.hasOwnProperty("txnNumber"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to not be assigned a transaction number since it cannot be retried: " +
- func.toString());
- }
}
+}
- testCommandCanBeRetried(function() {
- coll.insertOne({_id: 0});
- });
+testCommandCanBeRetried(function() {
+ coll.insertOne({_id: 0});
+});
- testCommandCanBeRetried(function() {
- coll.updateOne({_id: 0}, {$set: {a: 1}});
- });
+testCommandCanBeRetried(function() {
+ coll.updateOne({_id: 0}, {$set: {a: 1}});
+});
- testCommandCanBeRetried(function() {
- coll.updateOne({_id: 1}, {$set: {a: 2}}, {upsert: true});
- });
+testCommandCanBeRetried(function() {
+ coll.updateOne({_id: 1}, {$set: {a: 2}}, {upsert: true});
+});
- testCommandCanBeRetried(function() {
- coll.deleteOne({_id: 1});
- });
+testCommandCanBeRetried(function() {
+ coll.deleteOne({_id: 1});
+});
- testCommandCanBeRetried(function() {
- coll.insertMany([{_id: 2, b: 3}, {_id: 3, b: 4}], {ordered: true});
- });
+testCommandCanBeRetried(function() {
+ coll.insertMany([{_id: 2, b: 3}, {_id: 3, b: 4}], {ordered: true});
+});
- testCommandCanBeRetried(function() {
- coll.insertMany([{_id: 4}, {_id: 5}], {ordered: false});
- });
+testCommandCanBeRetried(function() {
+ coll.insertMany([{_id: 4}, {_id: 5}], {ordered: false});
+});
- testCommandCanBeRetried(function() {
- coll.updateMany({a: {$gt: 0}}, {$set: {c: 7}});
- }, false);
+testCommandCanBeRetried(function() {
+ coll.updateMany({a: {$gt: 0}}, {$set: {c: 7}});
+}, false);
- testCommandCanBeRetried(function() {
- coll.deleteMany({b: {$lt: 5}});
- }, false);
+testCommandCanBeRetried(function() {
+ coll.deleteMany({b: {$lt: 5}});
+}, false);
- //
- // Tests for writeConcern.
- //
+//
+// Tests for writeConcern.
+//
- testCommandCanBeRetried(function() {
- coll.insertOne({_id: 1}, {w: 1});
- });
+testCommandCanBeRetried(function() {
+ coll.insertOne({_id: 1}, {w: 1});
+});
- testCommandCanBeRetried(function() {
- coll.insertOne({_id: "majority"}, {w: "majority"});
- });
+testCommandCanBeRetried(function() {
+ coll.insertOne({_id: "majority"}, {w: "majority"});
+});
- //
- // Tests for bulkWrite().
- //
+//
+// Tests for bulkWrite().
+//
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{insertOne: {document: {_id: 10}}}]);
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{insertOne: {document: {_id: 10}}}]);
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{updateOne: {filter: {_id: 10}, update: {$set: {a: 1}}}}]);
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{updateOne: {filter: {_id: 10}, update: {$set: {a: 1}}}}]);
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{updateOne: {filter: {_id: 10}, update: {$set: {a: 2}}, upsert: true}}]);
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{updateOne: {filter: {_id: 10}, update: {$set: {a: 2}}, upsert: true}}]);
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{deleteOne: {filter: {_id: 10}}}]);
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{deleteOne: {filter: {_id: 10}}}]);
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite(
- [{insertOne: {document: {_id: 20, b: 3}}}, {insertOne: {document: {_id: 30, b: 4}}}],
- {ordered: true});
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite(
+ [{insertOne: {document: {_id: 20, b: 3}}}, {insertOne: {document: {_id: 30, b: 4}}}],
+ {ordered: true});
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{insertOne: {document: {_id: 40}}}, {insertOne: {document: {_id: 50}}}],
- {ordered: false});
- });
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{insertOne: {document: {_id: 40}}}, {insertOne: {document: {_id: 50}}}],
+ {ordered: false});
+});
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{updateMany: {filter: {a: {$gt: 0}}, update: {$set: {c: 7}}}}]);
- }, false);
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{updateMany: {filter: {a: {$gt: 0}}, update: {$set: {c: 7}}}}]);
+}, false);
- testCommandCanBeRetried(function() {
- coll.bulkWrite([{deleteMany: {filter: {b: {$lt: 5}}}}]);
- }, false);
+testCommandCanBeRetried(function() {
+ coll.bulkWrite([{deleteMany: {filter: {b: {$lt: 5}}}}]);
+}, false);
- //
- // Tests for wrappers around "findAndModify" command.
- //
+//
+// Tests for wrappers around "findAndModify" command.
+//
- testCommandCanBeRetried(function() {
- coll.findOneAndUpdate({_id: 100}, {$set: {d: 9}}, {upsert: true});
- });
+testCommandCanBeRetried(function() {
+ coll.findOneAndUpdate({_id: 100}, {$set: {d: 9}}, {upsert: true});
+});
- testCommandCanBeRetried(function() {
- coll.findOneAndReplace({_id: 100}, {e: 11});
- });
+testCommandCanBeRetried(function() {
+ coll.findOneAndReplace({_id: 100}, {e: 11});
+});
- testCommandCanBeRetried(function() {
- coll.findOneAndDelete({e: {$exists: true}});
- });
+testCommandCanBeRetried(function() {
+ coll.findOneAndDelete({e: {$exists: true}});
+});
- db.getSession().endSession();
- rst.stopSet();
+db.getSession().endSession();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_can_use_read_concern.js b/jstests/noPassthrough/shell_can_use_read_concern.js
index 183da4686ec..f3d567960e0 100644
--- a/jstests/noPassthrough/shell_can_use_read_concern.js
+++ b/jstests/noPassthrough/shell_can_use_read_concern.js
@@ -4,231 +4,226 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- // This test makes assertions on commands run without logical session ids.
- TestData.disableImplicitSessions = true;
+// This test makes assertions on commands run without logical session ids.
+TestData.disableImplicitSessions = true;
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
+const primary = rst.getPrimary();
- function runTests({withSession}) {
- let db;
+function runTests({withSession}) {
+ let db;
- if (withSession) {
- primary.setCausalConsistency(false);
- db = primary.startSession({causalConsistency: true}).getDatabase("test");
- } else {
- primary.setCausalConsistency(true);
- db = primary.getDB("test");
+ if (withSession) {
+ primary.setCausalConsistency(false);
+ db = primary.startSession({causalConsistency: true}).getDatabase("test");
+ } else {
+ primary.setCausalConsistency(true);
+ db = primary.getDB("test");
+ }
+
+ const coll = db.shell_can_use_read_concern;
+ coll.drop();
+
+ function testCommandCanBeCausallyConsistent(func, {
+ expectedSession: expectedSession = withSession,
+ expectedAfterClusterTime: expectedAfterClusterTime = true
+ } = {}) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
+
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
+
+ try {
+ assert.doesNotThrow(func);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
}
- const coll = db.shell_can_use_read_concern;
- coll.drop();
-
- function testCommandCanBeCausallyConsistent(func, {
- expectedSession: expectedSession = withSession,
- expectedAfterClusterTime: expectedAfterClusterTime = true
- } = {}) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
-
- const sentinel = {};
- let cmdObjSeen = sentinel;
-
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
-
- try {
- assert.doesNotThrow(func);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
-
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " +
- func.toString());
- }
-
- let cmdName = Object.keys(cmdObjSeen)[0];
-
- // If the command is in a wrapped form, then we look for the actual command object
- // inside
- // the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObjSeen)[0];
- }
-
- if (expectedSession) {
- assert(cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to have a logical session id: " + func.toString());
- } else {
- assert(!cmdObjSeen.hasOwnProperty("lsid"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to not have a logical session id: " + func.toString());
- }
-
- if (expectedAfterClusterTime) {
- assert(cmdObjSeen.hasOwnProperty("readConcern"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to have a readConcern object since it can be causally consistent: " +
- func.toString());
-
- const readConcern = cmdObjSeen.readConcern;
- assert(readConcern.hasOwnProperty("afterClusterTime"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to specify afterClusterTime since it can be causally consistent: " +
- func.toString());
- } else {
- assert(!cmdObjSeen.hasOwnProperty("readConcern"),
- "Expected operation " + tojson(cmdObjSeen) + " to not have a readConcern" +
- " object since it cannot be causally consistent: " + func.toString());
- }
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
}
- //
- // Tests for the "find" and "getMore" commands.
- //
-
- {
- testCommandCanBeCausallyConsistent(function() {
- assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
- }, {expectedSession: withSession, expectedAfterClusterTime: false});
-
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(
- db.runCommand({find: coll.getName(), batchSize: 5, singleBatch: true}));
- });
-
- const cursor = coll.find().batchSize(2);
-
- testCommandCanBeCausallyConsistent(function() {
- cursor.next();
- cursor.next();
- });
-
- testCommandCanBeCausallyConsistent(function() {
- cursor.next();
- cursor.next();
- cursor.next();
- assert(!cursor.hasNext());
- }, {
- expectedSession: withSession,
- expectedAfterClusterTime: false,
- });
+ let cmdName = Object.keys(cmdObjSeen)[0];
+
+ // If the command is in a wrapped form, then we look for the actual command object
+ // inside
+ // the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObjSeen)[0];
}
- //
- // Tests for the "count" command.
- //
+ if (expectedSession) {
+ assert(cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to have a logical session id: " + func.toString());
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("lsid"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to not have a logical session id: " + func.toString());
+ }
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({count: coll.getName()}));
- });
+ if (expectedAfterClusterTime) {
+ assert(cmdObjSeen.hasOwnProperty("readConcern"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to have a readConcern object since it can be causally consistent: " +
+ func.toString());
+
+ const readConcern = cmdObjSeen.readConcern;
+ assert(readConcern.hasOwnProperty("afterClusterTime"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to specify afterClusterTime since it can be causally consistent: " +
+ func.toString());
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("readConcern"),
+ "Expected operation " + tojson(cmdObjSeen) + " to not have a readConcern" +
+ " object since it cannot be causally consistent: " + func.toString());
+ }
+ }
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({query: {count: coll.getName()}}));
- });
+ //
+ // Tests for the "find" and "getMore" commands.
+ //
+ {
testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({$query: {count: coll.getName()}}));
- });
+ assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
+ }, {expectedSession: withSession, expectedAfterClusterTime: false});
testCommandCanBeCausallyConsistent(function() {
- assert.eq(5, coll.count());
+ assert.commandWorked(
+ db.runCommand({find: coll.getName(), batchSize: 5, singleBatch: true}));
});
- //
- // Tests for the "distinct" command.
- //
+ const cursor = coll.find().batchSize(2);
testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({distinct: coll.getName(), key: "_id"}));
+ cursor.next();
+ cursor.next();
});
testCommandCanBeCausallyConsistent(function() {
- const values = coll.distinct("_id");
- assert.eq(5, values.length, tojson(values));
+ cursor.next();
+ cursor.next();
+ cursor.next();
+ assert(!cursor.hasNext());
+ }, {
+ expectedSession: withSession,
+ expectedAfterClusterTime: false,
});
+ }
- //
- // Tests for the "aggregate" command.
- //
-
- {
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand(
- {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 5}}));
- });
-
- testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [],
- cursor: {batchSize: 5},
- explain: true
- }));
- });
-
- let cursor;
-
- testCommandCanBeCausallyConsistent(function() {
- cursor = coll.aggregate([], {cursor: {batchSize: 2}});
- cursor.next();
- cursor.next();
- });
-
- testCommandCanBeCausallyConsistent(function() {
- cursor.next();
- cursor.next();
- cursor.next();
- assert(!cursor.hasNext());
- }, {
- expectedSession: withSession,
- expectedAfterClusterTime: false,
- });
- }
+ //
+ // Tests for the "count" command.
+ //
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({count: coll.getName()}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({query: {count: coll.getName()}}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({$query: {count: coll.getName()}}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.eq(5, coll.count());
+ });
- //
- // Tests for the "geoSearch" command.
- //
+ //
+ // Tests for the "distinct" command.
+ //
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({distinct: coll.getName(), key: "_id"}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ const values = coll.distinct("_id");
+ assert.eq(5, values.length, tojson(values));
+ });
+
+ //
+ // Tests for the "aggregate" command.
+ //
+
+ {
testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(coll.createIndex({loc: "geoHaystack", other: 1}, {bucketSize: 1}));
- }, {expectedSession: withSession, expectedAfterClusterTime: false});
+ assert.commandWorked(
+ db.runCommand({aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 5}}));
+ });
testCommandCanBeCausallyConsistent(function() {
assert.commandWorked(db.runCommand(
- {geoSearch: coll.getName(), near: [0, 0], maxDistance: 1, search: {}}));
+ {aggregate: coll.getName(), pipeline: [], cursor: {batchSize: 5}, explain: true}));
});
- //
- // Tests for the "explain" command.
- //
+ let cursor;
testCommandCanBeCausallyConsistent(function() {
- assert.commandWorked(db.runCommand({explain: {find: coll.getName()}}));
+ cursor = coll.aggregate([], {cursor: {batchSize: 2}});
+ cursor.next();
+ cursor.next();
});
testCommandCanBeCausallyConsistent(function() {
- coll.find().explain();
+ cursor.next();
+ cursor.next();
+ cursor.next();
+ assert(!cursor.hasNext());
+ }, {
+ expectedSession: withSession,
+ expectedAfterClusterTime: false,
});
+ }
- testCommandCanBeCausallyConsistent(function() {
- coll.explain().find().finish();
- });
+ //
+ // Tests for the "geoSearch" command.
+ //
- db.getSession().endSession();
- }
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(coll.createIndex({loc: "geoHaystack", other: 1}, {bucketSize: 1}));
+ }, {expectedSession: withSession, expectedAfterClusterTime: false});
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(
+ db.runCommand({geoSearch: coll.getName(), near: [0, 0], maxDistance: 1, search: {}}));
+ });
+
+ //
+ // Tests for the "explain" command.
+ //
+
+ testCommandCanBeCausallyConsistent(function() {
+ assert.commandWorked(db.runCommand({explain: {find: coll.getName()}}));
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ coll.find().explain();
+ });
+
+ testCommandCanBeCausallyConsistent(function() {
+ coll.explain().find().finish();
+ });
+
+ db.getSession().endSession();
+}
- runTests({withSession: false});
- runTests({withSession: true});
+runTests({withSession: false});
+runTests({withSession: true});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_check_program_extension.js b/jstests/noPassthrough/shell_check_program_extension.js
index c8932b212c5..72b5b47b645 100644
--- a/jstests/noPassthrough/shell_check_program_extension.js
+++ b/jstests/noPassthrough/shell_check_program_extension.js
@@ -3,15 +3,15 @@
*/
(function() {
- 'use strict';
+'use strict';
- if (_isWindows()) {
- const filename = 'jstests/noPassthrough/libs/testWindowsExtension.bat';
+if (_isWindows()) {
+ const filename = 'jstests/noPassthrough/libs/testWindowsExtension.bat';
- clearRawMongoProgramOutput();
- const result = runMongoProgram(filename);
- assert.eq(result, 42);
- } else {
- jsTestLog("This test is only relevant for Windows environments.");
- }
+ clearRawMongoProgramOutput();
+ const result = runMongoProgram(filename);
+ assert.eq(result, 42);
+} else {
+ jsTestLog("This test is only relevant for Windows environments.");
+}
})();
diff --git a/jstests/noPassthrough/shell_cmd_assertions.js b/jstests/noPassthrough/shell_cmd_assertions.js
index de61b88355c..4bc800663f8 100644
--- a/jstests/noPassthrough/shell_cmd_assertions.js
+++ b/jstests/noPassthrough/shell_cmd_assertions.js
@@ -3,360 +3,357 @@
*/
(function() {
- "use strict";
-
- const conn = MongoRunner.runMongod();
- const db = conn.getDB("commandAssertions");
- const kFakeErrCode = 1234567890;
- const tests = [];
-
- const sampleWriteConcernError = {
- n: 1,
- ok: 1,
- writeConcernError: {
- code: ErrorCodes.WriteConcernFailed,
- codeName: "WriteConcernFailed",
- errmsg: "waiting for replication timed out",
- errInfo: {
- wtimeout: true,
- },
+"use strict";
+
+const conn = MongoRunner.runMongod();
+const db = conn.getDB("commandAssertions");
+const kFakeErrCode = 1234567890;
+const tests = [];
+
+const sampleWriteConcernError = {
+ n: 1,
+ ok: 1,
+ writeConcernError: {
+ code: ErrorCodes.WriteConcernFailed,
+ codeName: "WriteConcernFailed",
+ errmsg: "waiting for replication timed out",
+ errInfo: {
+ wtimeout: true,
},
- };
-
- function setup() {
- db.coll.drop();
- assert.writeOK(db.coll.insert({_id: 1}));
- }
-
- // Raw command responses.
- tests.push(function rawCommandOk() {
- const res = db.runCommand({"ping": 1});
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- });
-
- function _assertMsgFunctionExecution(
- assertFunc, assertParameter, {expectException: expectException = false} = {}) {
- var msgFunctionCalled = false;
- var expectedAssert = assert.doesNotThrow;
-
- if (expectException) {
- expectedAssert = assert.throws;
- }
-
- expectedAssert(() => {
- assertFunc(assertParameter, () => {
- msgFunctionCalled = true;
- });
- });
-
- assert.eq(
- expectException, msgFunctionCalled, "msg function execution should match assertion");
+ },
+};
+
+function setup() {
+ db.coll.drop();
+ assert.writeOK(db.coll.insert({_id: 1}));
+}
+
+// Raw command responses.
+tests.push(function rawCommandOk() {
+ const res = db.runCommand({"ping": 1});
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+function _assertMsgFunctionExecution(
+ assertFunc, assertParameter, {expectException: expectException = false} = {}) {
+ var msgFunctionCalled = false;
+ var expectedAssert = assert.doesNotThrow;
+
+ if (expectException) {
+ expectedAssert = assert.throws;
}
- tests.push(function msgFunctionOnlyCalledOnFailure() {
- const res = db.runCommand({"ping": 1});
-
- _assertMsgFunctionExecution(assert.commandWorked, res, {expectException: false});
- _assertMsgFunctionExecution(
- assert.commandWorkedIgnoringWriteErrors, res, {expectException: false});
- _assertMsgFunctionExecution(assert.commandFailed, res, {expectException: true});
-
- var msgFunctionCalled = false;
- assert.throws(() => assert.commandFailedWithCode(res, 0, () => {
+ expectedAssert(() => {
+ assertFunc(assertParameter, () => {
msgFunctionCalled = true;
- }));
- assert.eq(true, msgFunctionCalled, "msg function execution should match assertion");
- });
-
- tests.push(function rawCommandErr() {
- const res = db.runCommand({"IHopeNobodyEverMakesThisACommand": 1});
- assert.throws(() => assert.commandWorked(res));
- assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.CommandNotFound));
- // commandFailedWithCode should succeed if any of the passed error codes are matched.
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.CommandNotFound, kFakeErrCode]));
- assert.doesNotThrow(() => assert.commandWorkedOrFailedWithCode(
- res,
- [ErrorCodes.CommandNotFound, kFakeErrCode],
- "threw even though failed with correct error codes"));
- assert.throws(
- () => assert.commandWorkedOrFailedWithCode(
- res, [kFakeErrCode], "didn't throw even though failed with incorrect error code"));
- });
-
- tests.push(function rawCommandWriteOk() {
- const res = db.runCommand({insert: "coll", documents: [{_id: 2}]});
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- assert.doesNotThrow(
- () => assert.commandWorkedOrFailedWithCode(res, 0, "threw even though succeeded"));
- });
-
- tests.push(function rawCommandWriteErr() {
- const res = db.runCommand({insert: "coll", documents: [{_id: 1}]});
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- assert.throws(
- () => assert.commandWorkedOrFailedWithCode(
- res, [ErrorCodes.DuplicateKey, kFakeErrCode], "expected to throw on write error"));
- assert.throws(() => assert.commandWorkedOrFailedWithCode(
- res, [kFakeErrCode], "expected to throw on write error"));
- });
-
- tests.push(function collInsertWriteOk() {
- const res = db.coll.insert({_id: 2});
- assert(res instanceof WriteResult);
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- });
-
- tests.push(function collInsertWriteErr() {
- const res = db.coll.insert({_id: 1});
- assert(res instanceof WriteResult);
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- });
-
- tests.push(function collMultiInsertWriteOk() {
- const res = db.coll.insert([{_id: 3}, {_id: 2}]);
- assert(res instanceof BulkWriteResult);
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- assert.throws(
- () => assert.commandWorkedOrFailedWithCode(res, 0, "threw even though succeeded"));
- });
-
- tests.push(function collMultiInsertWriteErr() {
- const res = db.coll.insert([{_id: 1}, {_id: 2}]);
- assert(res instanceof BulkWriteResult);
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- });
-
- // Test when the insert command fails with ok:0 (i.e. not failing due to write err)
- tests.push(function collInsertCmdErr() {
- const res = db.coll.insert({x: 1}, {writeConcern: {"bad": 1}});
- assert(res instanceof WriteCommandError);
- assert.throws(() => assert.commandWorked(res));
- assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.FailedToParse));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.FailedToParse, kFakeErrCode]));
- });
-
- tests.push(function collMultiInsertCmdErr() {
- const res = db.coll.insert([{x: 1}, {x: 2}], {writeConcern: {"bad": 1}});
- assert(res instanceof WriteCommandError);
- assert.throws(() => assert.commandWorked(res));
- assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.FailedToParse));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.FailedToParse, kFakeErrCode]));
- assert.doesNotThrow(() => assert.commandWorkedOrFailedWithCode(
- res,
- [ErrorCodes.FailedToParse, kFakeErrCode],
- "threw even though failed with correct error codes"));
- assert.throws(
- () => assert.commandWorkedOrFailedWithCode(
- res, [kFakeErrCode], "didn't throw even though failed with incorrect error codes"));
- });
-
- tests.push(function mapReduceOk() {
- const res = db.coll.mapReduce(
- function() {
- emit(this._id, 0);
- },
- function(k, v) {
- return v[0];
- },
- {out: "coll_out"});
- assert(res instanceof MapReduceResult);
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- });
-
- tests.push(function mapReduceErr() {
- // db.coll.mapReduce throws if the command response has ok:0
- // Instead manually construct a MapReduceResult with ok:0
- const res = new MapReduceResult(db, {
- "ok": 0,
- "errmsg": "Example Error",
- "code": ErrorCodes.JSInterpreterFailure,
- "codeName": "JSInterpreterFailure"
});
- assert.throws(() => assert.commandWorked(res));
- assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() =>
- assert.commandFailedWithCode(res, ErrorCodes.JSInterpreterFailure));
- assert.doesNotThrow(() => assert.commandFailedWithCode(
- res, [ErrorCodes.JSInterpreterFailure, kFakeErrCode]));
});
- tests.push(function crudInsertOneOk() {
- const res = db.coll.insertOne({_id: 2});
- assert(res.hasOwnProperty("acknowledged"));
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
- });
-
- tests.push(function crudInsertOneErr() {
- let threw = false;
- let res = null;
- try {
- db.coll.insertOne({_id: 1});
- } catch (e) {
- threw = true;
- res = e;
- }
- assert(threw);
- assert(res instanceof WriteError);
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- });
-
- tests.push(function crudInsertManyOk() {
- const res = db.coll.insertMany([{_id: 2}, {_id: 3}]);
- assert(res.hasOwnProperty("acknowledged"));
- assert.doesNotThrow(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.throws(() => assert.commandFailed(res));
- assert.throws(() => assert.commandFailedWithCode(res, 0));
+ assert.eq(expectException, msgFunctionCalled, "msg function execution should match assertion");
+}
+
+tests.push(function msgFunctionOnlyCalledOnFailure() {
+ const res = db.runCommand({"ping": 1});
+
+ _assertMsgFunctionExecution(assert.commandWorked, res, {expectException: false});
+ _assertMsgFunctionExecution(
+ assert.commandWorkedIgnoringWriteErrors, res, {expectException: false});
+ _assertMsgFunctionExecution(assert.commandFailed, res, {expectException: true});
+
+ var msgFunctionCalled = false;
+ assert.throws(() => assert.commandFailedWithCode(res, 0, () => {
+ msgFunctionCalled = true;
+ }));
+ assert.eq(true, msgFunctionCalled, "msg function execution should match assertion");
+});
+
+tests.push(function rawCommandErr() {
+ const res = db.runCommand({"IHopeNobodyEverMakesThisACommand": 1});
+ assert.throws(() => assert.commandWorked(res));
+ assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.CommandNotFound));
+ // commandFailedWithCode should succeed if any of the passed error codes are matched.
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.CommandNotFound, kFakeErrCode]));
+ assert.doesNotThrow(() => assert.commandWorkedOrFailedWithCode(
+ res,
+ [ErrorCodes.CommandNotFound, kFakeErrCode],
+ "threw even though failed with correct error codes"));
+ assert.throws(
+ () => assert.commandWorkedOrFailedWithCode(
+ res, [kFakeErrCode], "didn't throw even though failed with incorrect error code"));
+});
+
+tests.push(function rawCommandWriteOk() {
+ const res = db.runCommand({insert: "coll", documents: [{_id: 2}]});
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+ assert.doesNotThrow(
+ () => assert.commandWorkedOrFailedWithCode(res, 0, "threw even though succeeded"));
+});
+
+tests.push(function rawCommandWriteErr() {
+ const res = db.runCommand({insert: "coll", documents: [{_id: 1}]});
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+ assert.throws(
+ () => assert.commandWorkedOrFailedWithCode(
+ res, [ErrorCodes.DuplicateKey, kFakeErrCode], "expected to throw on write error"));
+ assert.throws(() => assert.commandWorkedOrFailedWithCode(
+ res, [kFakeErrCode], "expected to throw on write error"));
+});
+
+tests.push(function collInsertWriteOk() {
+ const res = db.coll.insert({_id: 2});
+ assert(res instanceof WriteResult);
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+tests.push(function collInsertWriteErr() {
+ const res = db.coll.insert({_id: 1});
+ assert(res instanceof WriteResult);
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function collMultiInsertWriteOk() {
+ const res = db.coll.insert([{_id: 3}, {_id: 2}]);
+ assert(res instanceof BulkWriteResult);
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+ assert.throws(() =>
+ assert.commandWorkedOrFailedWithCode(res, 0, "threw even though succeeded"));
+});
+
+tests.push(function collMultiInsertWriteErr() {
+ const res = db.coll.insert([{_id: 1}, {_id: 2}]);
+ assert(res instanceof BulkWriteResult);
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+// Test when the insert command fails with ok:0 (i.e. not failing due to write err)
+tests.push(function collInsertCmdErr() {
+ const res = db.coll.insert({x: 1}, {writeConcern: {"bad": 1}});
+ assert(res instanceof WriteCommandError);
+ assert.throws(() => assert.commandWorked(res));
+ assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.FailedToParse));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.FailedToParse, kFakeErrCode]));
+});
+
+tests.push(function collMultiInsertCmdErr() {
+ const res = db.coll.insert([{x: 1}, {x: 2}], {writeConcern: {"bad": 1}});
+ assert(res instanceof WriteCommandError);
+ assert.throws(() => assert.commandWorked(res));
+ assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.FailedToParse));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.FailedToParse, kFakeErrCode]));
+ assert.doesNotThrow(() => assert.commandWorkedOrFailedWithCode(
+ res,
+ [ErrorCodes.FailedToParse, kFakeErrCode],
+ "threw even though failed with correct error codes"));
+ assert.throws(
+ () => assert.commandWorkedOrFailedWithCode(
+ res, [kFakeErrCode], "didn't throw even though failed with incorrect error codes"));
+});
+
+tests.push(function mapReduceOk() {
+ const res = db.coll.mapReduce(
+ function() {
+ emit(this._id, 0);
+ },
+ function(k, v) {
+ return v[0];
+ },
+ {out: "coll_out"});
+ assert(res instanceof MapReduceResult);
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+tests.push(function mapReduceErr() {
+ // db.coll.mapReduce throws if the command response has ok:0
+ // Instead manually construct a MapReduceResult with ok:0
+ const res = new MapReduceResult(db, {
+ "ok": 0,
+ "errmsg": "Example Error",
+ "code": ErrorCodes.JSInterpreterFailure,
+ "codeName": "JSInterpreterFailure"
});
-
- tests.push(function crudInsertManyErr() {
- let threw = false;
- let res = null;
- try {
- db.coll.insertMany([{_id: 1}, {_id: 2}]);
- } catch (e) {
- threw = true;
- res = e;
- }
- assert(threw);
- assert(res instanceof BulkWriteError);
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+ assert.throws(() => assert.commandWorked(res));
+ assert.throws(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.JSInterpreterFailure));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.JSInterpreterFailure, kFakeErrCode]));
+});
+
+tests.push(function crudInsertOneOk() {
+ const res = db.coll.insertOne({_id: 2});
+ assert(res.hasOwnProperty("acknowledged"));
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+tests.push(function crudInsertOneErr() {
+ let threw = false;
+ let res = null;
+ try {
+ db.coll.insertOne({_id: 1});
+ } catch (e) {
+ threw = true;
+ res = e;
+ }
+ assert(threw);
+ assert(res instanceof WriteError);
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function crudInsertManyOk() {
+ const res = db.coll.insertMany([{_id: 2}, {_id: 3}]);
+ assert(res.hasOwnProperty("acknowledged"));
+ assert.doesNotThrow(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.throws(() => assert.commandFailed(res));
+ assert.throws(() => assert.commandFailedWithCode(res, 0));
+});
+
+tests.push(function crudInsertManyErr() {
+ let threw = false;
+ let res = null;
+ try {
+ db.coll.insertMany([{_id: 1}, {_id: 2}]);
+ } catch (e) {
+ threw = true;
+ res = e;
+ }
+ assert(threw);
+ assert(res instanceof BulkWriteError);
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function rawMultiWriteErr() {
+ // Do an unordered bulk insert with duplicate keys to produce multiple write errors.
+ const res = db.runCommand({"insert": "coll", documents: [{_id: 1}, {_id: 1}], ordered: false});
+ assert(res.writeErrors.length == 2, "did not get multiple write errors");
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function bulkMultiWriteErr() {
+ // Do an unordered bulk insert with duplicate keys to produce multiple write errors.
+ const res = db.coll.insert([{_id: 1}, {_id: 1}], {ordered: false});
+ assert.throws(() => assert.commandWorked(res));
+ assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
+ assert.doesNotThrow(() => assert.commandFailed(res));
+ assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
+ assert.doesNotThrow(
+ () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+});
+
+tests.push(function writeConcernErrorCausesCommandWorkedToAssert() {
+ const result = sampleWriteConcernError;
+
+ assert.throws(() => {
+ assert.commandWorked(result);
});
+});
- tests.push(function rawMultiWriteErr() {
- // Do an unordered bulk insert with duplicate keys to produce multiple write errors.
- const res =
- db.runCommand({"insert": "coll", documents: [{_id: 1}, {_id: 1}], ordered: false});
- assert(res.writeErrors.length == 2, "did not get multiple write errors");
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
- });
+tests.push(function writeConcernErrorCausesCommandFailedToPass() {
+ const result = sampleWriteConcernError;
- tests.push(function bulkMultiWriteErr() {
- // Do an unordered bulk insert with duplicate keys to produce multiple write errors.
- const res = db.coll.insert([{_id: 1}, {_id: 1}], {ordered: false});
- assert.throws(() => assert.commandWorked(res));
- assert.doesNotThrow(() => assert.commandWorkedIgnoringWriteErrors(res));
- assert.doesNotThrow(() => assert.commandFailed(res));
- assert.doesNotThrow(() => assert.commandFailedWithCode(res, ErrorCodes.DuplicateKey));
- assert.doesNotThrow(
- () => assert.commandFailedWithCode(res, [ErrorCodes.DuplicateKey, kFakeErrCode]));
+ assert.doesNotThrow(() => {
+ assert.commandFailed(result);
+ assert.commandFailedWithCode(result, ErrorCodes.WriteConcernFailed);
});
+});
- tests.push(function writeConcernErrorCausesCommandWorkedToAssert() {
- const result = sampleWriteConcernError;
+tests.push(function writeConcernErrorCanBeIgnored() {
+ const result = sampleWriteConcernError;
- assert.throws(() => {
- assert.commandWorked(result);
- });
+ assert.doesNotThrow(() => {
+ assert.commandWorkedIgnoringWriteConcernErrors(result);
});
+});
- tests.push(function writeConcernErrorCausesCommandFailedToPass() {
- const result = sampleWriteConcernError;
+tests.push(function invalidResponsesAttemptToProvideInformationToCommandWorks() {
+ const invalidResponses = [undefined, 'not a valid response', 42];
- assert.doesNotThrow(() => {
- assert.commandFailed(result);
- assert.commandFailedWithCode(result, ErrorCodes.WriteConcernFailed);
+ invalidResponses.forEach((invalidRes) => {
+ const error = assert.throws(() => {
+ assert.commandWorked(invalidRes);
});
- });
-
- tests.push(function writeConcernErrorCanBeIgnored() {
- const result = sampleWriteConcernError;
- assert.doesNotThrow(() => {
- assert.commandWorkedIgnoringWriteConcernErrors(result);
- });
+ assert.gte(error.message.indexOf(invalidRes), 0);
+ assert.gte(error.message.indexOf(typeof invalidRes), 0);
});
+});
- tests.push(function invalidResponsesAttemptToProvideInformationToCommandWorks() {
- const invalidResponses = [undefined, 'not a valid response', 42];
+tests.push(function invalidResponsesAttemptToProvideInformationCommandFailed() {
+ const invalidResponses = [undefined, 'not a valid response', 42];
- invalidResponses.forEach((invalidRes) => {
- const error = assert.throws(() => {
- assert.commandWorked(invalidRes);
- });
-
- assert.gte(error.message.indexOf(invalidRes), 0);
- assert.gte(error.message.indexOf(typeof invalidRes), 0);
+ invalidResponses.forEach((invalidRes) => {
+ const error = assert.throws(() => {
+ assert.commandFailed(invalidRes);
});
- });
-
- tests.push(function invalidResponsesAttemptToProvideInformationCommandFailed() {
- const invalidResponses = [undefined, 'not a valid response', 42];
- invalidResponses.forEach((invalidRes) => {
- const error = assert.throws(() => {
- assert.commandFailed(invalidRes);
- });
-
- assert.gte(error.message.indexOf(invalidRes), 0);
- assert.gte(error.message.indexOf(typeof invalidRes), 0);
- });
+ assert.gte(error.message.indexOf(invalidRes), 0);
+ assert.gte(error.message.indexOf(typeof invalidRes), 0);
});
+});
- tests.forEach((test) => {
- jsTest.log(`Starting test '${test.name}'`);
- setup();
- test();
- });
+tests.forEach((test) => {
+ jsTest.log(`Starting test '${test.name}'`);
+ setup();
+ test();
+});
- /* cleanup */
- MongoRunner.stopMongod(conn);
+/* cleanup */
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/shell_disable_majority_reads.js b/jstests/noPassthrough/shell_disable_majority_reads.js
index 2bd62c46b1c..fa44f462646 100644
--- a/jstests/noPassthrough/shell_disable_majority_reads.js
+++ b/jstests/noPassthrough/shell_disable_majority_reads.js
@@ -2,34 +2,34 @@
// @tags: [requires_wiredtiger, requires_replication, requires_majority_read_concern,
// requires_persistence]
(function() {
- "use strict";
+"use strict";
- // Majority reads are enabled by default.
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+// Majority reads are enabled by default.
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- let serverStatus = rst.getPrimary().getDB("test").serverStatus();
- assert(serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
- rst.stopSet();
+let serverStatus = rst.getPrimary().getDB("test").serverStatus();
+assert(serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
+rst.stopSet();
- // Explicitly enable majority reads.
- TestData.enableMajorityReadConcern = true;
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+// Explicitly enable majority reads.
+TestData.enableMajorityReadConcern = true;
+rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- serverStatus = rst.getPrimary().getDB("test").serverStatus();
- assert(serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
- rst.stopSet();
+serverStatus = rst.getPrimary().getDB("test").serverStatus();
+assert(serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
+rst.stopSet();
- // Explicitly disable majority reads.
- TestData.enableMajorityReadConcern = false;
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+// Explicitly disable majority reads.
+TestData.enableMajorityReadConcern = false;
+rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- serverStatus = rst.getPrimary().getDB("test").serverStatus();
- assert(!serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
- rst.stopSet();
+serverStatus = rst.getPrimary().getDB("test").serverStatus();
+assert(!serverStatus.storageEngine.supportsCommittedReads, tojson(serverStatus));
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_gossip_cluster_time.js b/jstests/noPassthrough/shell_gossip_cluster_time.js
index 462ad7e34da..119ba1e23dc 100644
--- a/jstests/noPassthrough/shell_gossip_cluster_time.js
+++ b/jstests/noPassthrough/shell_gossip_cluster_time.js
@@ -4,129 +4,124 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
+const primary = rst.getPrimary();
- const session1 = primary.startSession();
- const session2 = primary.startSession();
+const session1 = primary.startSession();
+const session2 = primary.startSession();
- const db = primary.getDB("test");
- const coll = db.shell_gossip_cluster_time;
+const db = primary.getDB("test");
+const coll = db.shell_gossip_cluster_time;
- function testCommandGossipedWithClusterTime(func, expectedClusterTime) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+function testCommandGossipedWithClusterTime(func, expectedClusterTime) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const sentinel = {};
- let cmdObjSeen = sentinel;
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- try {
- assert.doesNotThrow(func);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
+ try {
+ assert.doesNotThrow(func);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
+ }
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
- }
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + func.toString());
+ }
- let cmdName = Object.keys(cmdObjSeen)[0];
+ let cmdName = Object.keys(cmdObjSeen)[0];
- // If the command is in a wrapped form, then we look for the actual command object inside
- // the query/$query object.
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObjSeen)[0];
- }
+ // If the command is in a wrapped form, then we look for the actual command object inside
+ // the query/$query object.
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObjSeen)[0];
+ }
- if (expectedClusterTime === undefined) {
- assert(!cmdObjSeen.hasOwnProperty("$clusterTime"),
- "Expected operation " + tojson(cmdObjSeen) +
- " to not have a $clusterTime object: " + func.toString());
- } else {
- assert(cmdObjSeen.hasOwnProperty("$clusterTime"),
- "Expected operation " + tojson(cmdObjSeen) + " to have a $clusterTime object: " +
- func.toString());
+ if (expectedClusterTime === undefined) {
+ assert(!cmdObjSeen.hasOwnProperty("$clusterTime"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to not have a $clusterTime object: " + func.toString());
+ } else {
+ assert(cmdObjSeen.hasOwnProperty("$clusterTime"),
+ "Expected operation " + tojson(cmdObjSeen) +
+ " to have a $clusterTime object: " + func.toString());
- assert(bsonBinaryEqual(expectedClusterTime, cmdObjSeen.$clusterTime));
- }
+ assert(bsonBinaryEqual(expectedClusterTime, cmdObjSeen.$clusterTime));
}
-
- assert(
- session1.getClusterTime() === undefined,
- "session1 has yet to be used, but has clusterTime: " + tojson(session1.getClusterTime()));
- assert(
- session2.getClusterTime() === undefined,
- "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
-
- // Advance the clusterTime outside of either of the sessions.
- testCommandGossipedWithClusterTime(function() {
- assert.writeOK(coll.insert({}));
- }, primary.getClusterTime());
-
- assert(
- session1.getClusterTime() === undefined,
- "session1 has yet to be used, but has clusterTime: " + tojson(session1.getClusterTime()));
- assert(
- session2.getClusterTime() === undefined,
- "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
-
- // Performing an operation with session1 should use the highest clusterTime seen by the client
- // since session1 hasn't been used yet.
- testCommandGossipedWithClusterTime(function() {
- const coll = session1.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
- }, primary.getClusterTime());
-
- assert.eq(session1.getClusterTime(), primary.getClusterTime());
-
- testCommandGossipedWithClusterTime(function() {
- const coll = session1.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
- }, session1.getClusterTime());
-
- assert(
- session2.getClusterTime() === undefined,
- "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
-
- primary.resetClusterTime_forTesting();
- assert(primary.getClusterTime() === undefined,
- "client's cluster time should have been reset, but has clusterTime: " +
- tojson(primary.getClusterTime()));
-
- // Performing an operation with session2 should use the highest clusterTime seen by session2
- // since the client's clusterTime has been reset.
- session2.advanceClusterTime(session1.getClusterTime());
- testCommandGossipedWithClusterTime(function() {
- const coll = session2.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
- }, session2.getClusterTime());
-
- assert.eq(session2.getClusterTime(), primary.getClusterTime());
-
- primary.resetClusterTime_forTesting();
- assert(primary.getClusterTime() === undefined,
- "client's cluster time should have been reset, but has clusterTime: " +
- tojson(primary.getClusterTime()));
-
- // Performing an operation with session2 should use the highest clusterTime seen by session2
- // since the highest clusterTime seen by session1 is behind that of session2's.
- primary.advanceClusterTime(session1.getClusterTime());
- testCommandGossipedWithClusterTime(function() {
- const coll = session2.getDatabase("test").mycoll;
- assert.writeOK(coll.insert({}));
- }, session2.getClusterTime());
-
- rst.stopSet();
+}
+
+assert(session1.getClusterTime() === undefined,
+ "session1 has yet to be used, but has clusterTime: " + tojson(session1.getClusterTime()));
+assert(session2.getClusterTime() === undefined,
+ "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
+
+// Advance the clusterTime outside of either of the sessions.
+testCommandGossipedWithClusterTime(function() {
+ assert.writeOK(coll.insert({}));
+}, primary.getClusterTime());
+
+assert(session1.getClusterTime() === undefined,
+ "session1 has yet to be used, but has clusterTime: " + tojson(session1.getClusterTime()));
+assert(session2.getClusterTime() === undefined,
+ "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
+
+// Performing an operation with session1 should use the highest clusterTime seen by the client
+// since session1 hasn't been used yet.
+testCommandGossipedWithClusterTime(function() {
+ const coll = session1.getDatabase("test").mycoll;
+ assert.writeOK(coll.insert({}));
+}, primary.getClusterTime());
+
+assert.eq(session1.getClusterTime(), primary.getClusterTime());
+
+testCommandGossipedWithClusterTime(function() {
+ const coll = session1.getDatabase("test").mycoll;
+ assert.writeOK(coll.insert({}));
+}, session1.getClusterTime());
+
+assert(session2.getClusterTime() === undefined,
+ "session2 has yet to be used, but has clusterTime: " + tojson(session2.getClusterTime()));
+
+primary.resetClusterTime_forTesting();
+assert(primary.getClusterTime() === undefined,
+ "client's cluster time should have been reset, but has clusterTime: " +
+ tojson(primary.getClusterTime()));
+
+// Performing an operation with session2 should use the highest clusterTime seen by session2
+// since the client's clusterTime has been reset.
+session2.advanceClusterTime(session1.getClusterTime());
+testCommandGossipedWithClusterTime(function() {
+ const coll = session2.getDatabase("test").mycoll;
+ assert.writeOK(coll.insert({}));
+}, session2.getClusterTime());
+
+assert.eq(session2.getClusterTime(), primary.getClusterTime());
+
+primary.resetClusterTime_forTesting();
+assert(primary.getClusterTime() === undefined,
+ "client's cluster time should have been reset, but has clusterTime: " +
+ tojson(primary.getClusterTime()));
+
+// Performing an operation with session2 should use the highest clusterTime seen by session2
+// since the highest clusterTime seen by session1 is behind that of session2's.
+primary.advanceClusterTime(session1.getClusterTime());
+testCommandGossipedWithClusterTime(function() {
+ const coll = session2.getDatabase("test").mycoll;
+ assert.writeOK(coll.insert({}));
+}, session2.getClusterTime());
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_helper_use_database.js b/jstests/noPassthrough/shell_helper_use_database.js
index 553e6df34d9..4fe5eea737a 100644
--- a/jstests/noPassthrough/shell_helper_use_database.js
+++ b/jstests/noPassthrough/shell_helper_use_database.js
@@ -7,33 +7,33 @@
var db;
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod({});
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod({});
+assert.neq(null, conn, "mongod was unable to start up");
- db = conn.getDB("db1");
- assert.eq("db1", db.getName());
+db = conn.getDB("db1");
+assert.eq("db1", db.getName());
- // Tests that shellHelper.use() updates the global 'db' object to refer to a DB object with the
- // database name specified.
- shellHelper.use("db2");
- assert.eq("db2", db.getName());
+// Tests that shellHelper.use() updates the global 'db' object to refer to a DB object with the
+// database name specified.
+shellHelper.use("db2");
+assert.eq("db2", db.getName());
- // Replace the global 'db' object with a DB object from a new session and verify that
- // shellHelper.use() still works.
- db = conn.startSession().getDatabase("db1");
- assert.eq("db1", db.getName());
+// Replace the global 'db' object with a DB object from a new session and verify that
+// shellHelper.use() still works.
+db = conn.startSession().getDatabase("db1");
+assert.eq("db1", db.getName());
- const session = db.getSession();
+const session = db.getSession();
- // Tests that shellHelper.use() updates the global 'db' object to refer to a DB object with the
- // database name specified. The DB objects should have the same underlying DriverSession object.
- shellHelper.use("db2");
- assert.eq("db2", db.getName());
+// Tests that shellHelper.use() updates the global 'db' object to refer to a DB object with the
+// database name specified. The DB objects should have the same underlying DriverSession object.
+shellHelper.use("db2");
+assert.eq("db2", db.getName());
- assert(session === db.getSession(), "session wasn't inherited as part of switching databases");
+assert(session === db.getSession(), "session wasn't inherited as part of switching databases");
- session.endSession();
- MongoRunner.stopMongod(conn);
+session.endSession();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/shell_history.js b/jstests/noPassthrough/shell_history.js
index a83425553df..98adac83dc9 100644
--- a/jstests/noPassthrough/shell_history.js
+++ b/jstests/noPassthrough/shell_history.js
@@ -2,103 +2,102 @@
// appropriate permissions (where relevant).
(function() {
- "use strict";
-
- // Use dataPath because it includes the trailing "/" or "\".
- var tmpHome = MongoRunner.dataPath;
- // Ensure it exists and is a dir (eg. if running without resmoke.py and /data/db doesn't exist).
- mkdir(tmpHome);
- removeFile(tmpHome + ".dbshell");
-
- var args = [];
- var cmdline = "mongo --nodb";
- var redirection = "";
- var env = {};
- if (_isWindows()) {
- args.push("cmd.exe");
- args.push("/c");
-
- // Input is set to NUL. The output must also be redirected to NUL, otherwise running the
- // jstest manually has strange terminal IO behaviour.
- redirection = "< NUL > NUL";
-
- // USERPROFILE set to the tmp homedir.
- // Since NUL is a character device, isatty() will return true, which means that .mongorc.js
- // will be created in the HOMEDRIVE + HOMEPATH location, so we must set them also.
- if (tmpHome.match("^[a-zA-Z]:")) {
- var tmpHomeDrive = tmpHome.substr(0, 2);
- var tmpHomePath = tmpHome.substr(2);
- } else {
- var _pwd = pwd();
- assert(_pwd.match("^[a-zA-Z]:"), "pwd must include drive");
- var tmpHomeDrive = _pwd.substr(0, 2);
- var tmpHomePath = tmpHome;
- }
- env = {USERPROFILE: tmpHome, HOMEDRIVE: tmpHomeDrive, HOMEPATH: tmpHomePath};
-
+"use strict";
+
+// Use dataPath because it includes the trailing "/" or "\".
+var tmpHome = MongoRunner.dataPath;
+// Ensure it exists and is a dir (eg. if running without resmoke.py and /data/db doesn't exist).
+mkdir(tmpHome);
+removeFile(tmpHome + ".dbshell");
+
+var args = [];
+var cmdline = "mongo --nodb";
+var redirection = "";
+var env = {};
+if (_isWindows()) {
+ args.push("cmd.exe");
+ args.push("/c");
+
+ // Input is set to NUL. The output must also be redirected to NUL, otherwise running the
+ // jstest manually has strange terminal IO behaviour.
+ redirection = "< NUL > NUL";
+
+ // USERPROFILE set to the tmp homedir.
+ // Since NUL is a character device, isatty() will return true, which means that .mongorc.js
+ // will be created in the HOMEDRIVE + HOMEPATH location, so we must set them also.
+ if (tmpHome.match("^[a-zA-Z]:")) {
+ var tmpHomeDrive = tmpHome.substr(0, 2);
+ var tmpHomePath = tmpHome.substr(2);
} else {
- args.push("sh");
- args.push("-c");
+ var _pwd = pwd();
+ assert(_pwd.match("^[a-zA-Z]:"), "pwd must include drive");
+ var tmpHomeDrive = _pwd.substr(0, 2);
+ var tmpHomePath = tmpHome;
+ }
+ env = {USERPROFILE: tmpHome, HOMEDRIVE: tmpHomeDrive, HOMEPATH: tmpHomePath};
- // Use the mongo shell from the current dir, same as resmoke.py does.
- // Doesn't handle resmoke's --mongo= option.
- cmdline = "./" + cmdline;
+} else {
+ args.push("sh");
+ args.push("-c");
- // Set umask to 0 prior to running the shell.
- cmdline = "umask 0 ; " + cmdline;
+ // Use the mongo shell from the current dir, same as resmoke.py does.
+ // Doesn't handle resmoke's --mongo= option.
+ cmdline = "./" + cmdline;
- // stdin is /dev/null.
- redirection = "< /dev/null";
+ // Set umask to 0 prior to running the shell.
+ cmdline = "umask 0 ; " + cmdline;
- // HOME set to the tmp homedir.
- if (!tmpHome.startsWith("/")) {
- tmpHome = pwd() + "/" + tmpHome;
- }
- env = {HOME: tmpHome};
+ // stdin is /dev/null.
+ redirection = "< /dev/null";
+
+ // HOME set to the tmp homedir.
+ if (!tmpHome.startsWith("/")) {
+ tmpHome = pwd() + "/" + tmpHome;
}
+ env = {HOME: tmpHome};
+}
- // Add redirection to cmdline, and add cmdline to args.
- cmdline += " " + redirection;
- args.push(cmdline);
- jsTestLog("Running args:\n " + tojson(args) + "\nwith env:\n " + tojson(env));
- var pid = _startMongoProgram({args, env});
- var rc = waitProgram(pid);
+// Add redirection to cmdline, and add cmdline to args.
+cmdline += " " + redirection;
+args.push(cmdline);
+jsTestLog("Running args:\n " + tojson(args) + "\nwith env:\n " + tojson(env));
+var pid = _startMongoProgram({args, env});
+var rc = waitProgram(pid);
- assert.eq(rc, 0);
+assert.eq(rc, 0);
- var files = listFiles(tmpHome);
- jsTestLog(tojson(files));
+var files = listFiles(tmpHome);
+jsTestLog(tojson(files));
- var findFile = function(baseName) {
- for (var i = 0; i < files.length; i++) {
- if (files[i].baseName === baseName) {
- return files[i];
- }
+var findFile = function(baseName) {
+ for (var i = 0; i < files.length; i++) {
+ if (files[i].baseName === baseName) {
+ return files[i];
}
- return undefined;
- };
-
- var targetFile = ".dbshell";
- var file = findFile(targetFile);
-
- assert.neq(typeof(file), "undefined", targetFile + " should exist, but it doesn't");
- assert.eq(file.isDirectory, false, targetFile + " should not be a directory, but it is");
- assert.eq(file.size, 0, targetFile + " should be empty, but it isn't");
-
- if (!_isWindows()) {
- // On Unix, check that the file has the correct mode (permissions).
- // The shell has no way to stat a file.
- // There is no stat utility in POSIX.
- // `ls -l` is POSIX, so this is the best that we have.
- // Check for exactly "-rw-------".
- clearRawMongoProgramOutput();
- var rc = runProgram("ls", "-l", file.name);
- assert.eq(rc, 0);
-
- var output = rawMongoProgramOutput();
- var fields = output.split(" ");
- // First field is the prefix, second field is the `ls -l` permissions.
- assert.eq(fields[1].substr(0, 10), "-rw-------", targetFile + " has bad permissions");
}
+ return undefined;
+};
+
+var targetFile = ".dbshell";
+var file = findFile(targetFile);
+
+assert.neq(typeof (file), "undefined", targetFile + " should exist, but it doesn't");
+assert.eq(file.isDirectory, false, targetFile + " should not be a directory, but it is");
+assert.eq(file.size, 0, targetFile + " should be empty, but it isn't");
+
+if (!_isWindows()) {
+ // On Unix, check that the file has the correct mode (permissions).
+ // The shell has no way to stat a file.
+ // There is no stat utility in POSIX.
+ // `ls -l` is POSIX, so this is the best that we have.
+ // Check for exactly "-rw-------".
+ clearRawMongoProgramOutput();
+ var rc = runProgram("ls", "-l", file.name);
+ assert.eq(rc, 0);
+ var output = rawMongoProgramOutput();
+ var fields = output.split(" ");
+ // First field is the prefix, second field is the `ls -l` permissions.
+ assert.eq(fields[1].substr(0, 10), "-rw-------", targetFile + " has bad permissions");
+}
})();
diff --git a/jstests/noPassthrough/shell_interactive.js b/jstests/noPassthrough/shell_interactive.js
index 970e4f1d10c..ea23099a546 100644
--- a/jstests/noPassthrough/shell_interactive.js
+++ b/jstests/noPassthrough/shell_interactive.js
@@ -2,23 +2,22 @@
// and true when running in interactive mode
(function() {
- "use strict";
-
- if (!_isWindows()) {
- clearRawMongoProgramOutput();
- var rc = runProgram("./mongo", "--nodb", "--quiet", "--eval", "print(isInteractive())");
- assert.eq(rc, 0);
- var output = rawMongoProgramOutput();
- var response = (output.split('\n').slice(-2)[0]).split(' ')[1];
- assert.eq(response, "false", "Expected 'false' in script mode");
- // now try interactive
- clearRawMongoProgramOutput();
- rc = runProgram(
- "./mongo", "--nodb", "--quiet", "--shell", "--eval", "print(isInteractive()); quit()");
- assert.eq(rc, 0);
- output = rawMongoProgramOutput();
- response = (output.split('\n').slice(-2)[0]).split(' ')[1];
- assert.eq(response, "true", "Expected 'true' in interactive mode");
- }
+"use strict";
+if (!_isWindows()) {
+ clearRawMongoProgramOutput();
+ var rc = runProgram("./mongo", "--nodb", "--quiet", "--eval", "print(isInteractive())");
+ assert.eq(rc, 0);
+ var output = rawMongoProgramOutput();
+ var response = (output.split('\n').slice(-2)[0]).split(' ')[1];
+ assert.eq(response, "false", "Expected 'false' in script mode");
+ // now try interactive
+ clearRawMongoProgramOutput();
+ rc = runProgram(
+ "./mongo", "--nodb", "--quiet", "--shell", "--eval", "print(isInteractive()); quit()");
+ assert.eq(rc, 0);
+ output = rawMongoProgramOutput();
+ response = (output.split('\n').slice(-2)[0]).split(' ')[1];
+ assert.eq(response, "true", "Expected 'true' in interactive mode");
+}
})();
diff --git a/jstests/noPassthrough/shell_load_file.js b/jstests/noPassthrough/shell_load_file.js
index bdba591694c..6da5cf27baf 100644
--- a/jstests/noPassthrough/shell_load_file.js
+++ b/jstests/noPassthrough/shell_load_file.js
@@ -2,43 +2,41 @@
* Tests the exception handling behavior of the load() function across nested calls.
*/
(function() {
- "use strict";
+"use strict";
- let isMain = true;
+let isMain = true;
- if (TestData.hasOwnProperty("loadDepth")) {
- isMain = false;
- ++TestData.loadDepth;
- } else {
- TestData.loadDepth = 0;
- TestData.loadErrors = [];
- }
+if (TestData.hasOwnProperty("loadDepth")) {
+ isMain = false;
+ ++TestData.loadDepth;
+} else {
+ TestData.loadDepth = 0;
+ TestData.loadErrors = [];
+}
- if (TestData.loadDepth >= 3) {
- throw new Error("Intentionally thrown");
- }
+if (TestData.loadDepth >= 3) {
+ throw new Error("Intentionally thrown");
+}
- try {
- load("jstests/noPassthrough/shell_load_file.js");
- } catch (e) {
- TestData.loadErrors.push(e);
+try {
+ load("jstests/noPassthrough/shell_load_file.js");
+} catch (e) {
+ TestData.loadErrors.push(e);
- if (!isMain) {
- throw e;
- }
+ if (!isMain) {
+ throw e;
}
+}
- assert(isMain,
- "only the root caller of load() needs to check the generated JavaScript exceptions");
+assert(isMain, "only the root caller of load() needs to check the generated JavaScript exceptions");
- for (let i = 0; i < TestData.loadErrors.length; ++i) {
- const error = TestData.loadErrors[i];
- assert.eq("error loading js file: jstests/noPassthrough/shell_load_file.js", error.message);
- assert(
- /@jstests\/noPassthrough\/shell_load_file.js:/.test(error.stack) ||
- /@jstests\\noPassthrough\\shell_load_file.js:/.test(error.stack),
- () =>
- "JavaScript stacktrace from load() didn't include file paths (AKA stack frames): " +
- error.stack);
- }
+for (let i = 0; i < TestData.loadErrors.length; ++i) {
+ const error = TestData.loadErrors[i];
+ assert.eq("error loading js file: jstests/noPassthrough/shell_load_file.js", error.message);
+ assert(
+ /@jstests\/noPassthrough\/shell_load_file.js:/.test(error.stack) ||
+ /@jstests\\noPassthrough\\shell_load_file.js:/.test(error.stack),
+ () => "JavaScript stacktrace from load() didn't include file paths (AKA stack frames): " +
+ error.stack);
+}
})();
diff --git a/jstests/noPassthrough/shell_mongobridge_port_allocation.js b/jstests/noPassthrough/shell_mongobridge_port_allocation.js
index a61eda2fc87..964080682db 100644
--- a/jstests/noPassthrough/shell_mongobridge_port_allocation.js
+++ b/jstests/noPassthrough/shell_mongobridge_port_allocation.js
@@ -6,74 +6,73 @@
* @tags: [requires_replication, requires_sharding]
*/
(function() {
- "use strict";
+"use strict";
- function checkBridgeOffset(node, processType) {
- const bridgePort = node.port;
- const serverPort =
- assert.commandWorked(node.adminCommand({getCmdLineOpts: 1})).parsed.net.port;
- assert.neq(bridgePort,
- serverPort,
- node + " is a connection to " + processType + " rather than to mongobridge");
- assert.eq(bridgePort + MongoBridge.kBridgeOffset,
- serverPort,
- "corresponding mongobridge and " + processType +
- " ports should be staggered by a multiple of 10");
- }
+function checkBridgeOffset(node, processType) {
+ const bridgePort = node.port;
+ const serverPort = assert.commandWorked(node.adminCommand({getCmdLineOpts: 1})).parsed.net.port;
+ assert.neq(bridgePort,
+ serverPort,
+ node + " is a connection to " + processType + " rather than to mongobridge");
+ assert.eq(bridgePort + MongoBridge.kBridgeOffset,
+ serverPort,
+ "corresponding mongobridge and " + processType +
+ " ports should be staggered by a multiple of 10");
+}
- // We use >5 nodes to ensure that allocating twice as many ports doesn't interfere with having
- // the corresponding mongobridge and mongod ports staggered by a multiple of 10.
- const rst = new ReplSetTest({nodes: 7, useBridge: true});
- rst.startSet();
+// We use >5 nodes to ensure that allocating twice as many ports doesn't interfere with having
+// the corresponding mongobridge and mongod ports staggered by a multiple of 10.
+const rst = new ReplSetTest({nodes: 7, useBridge: true});
+rst.startSet();
- // Rig the election so that the primary remains stable throughout this test despite the replica
- // set having a larger number of members.
- const replSetConfig = rst.getReplSetConfig();
- for (let i = 1; i < rst.nodes.length; ++i) {
- replSetConfig.members[i].priority = 0;
- replSetConfig.members[i].votes = 0;
- }
- rst.initiate(replSetConfig);
+// Rig the election so that the primary remains stable throughout this test despite the replica
+// set having a larger number of members.
+const replSetConfig = rst.getReplSetConfig();
+for (let i = 1; i < rst.nodes.length; ++i) {
+ replSetConfig.members[i].priority = 0;
+ replSetConfig.members[i].votes = 0;
+}
+rst.initiate(replSetConfig);
- for (let node of rst.nodes) {
- checkBridgeOffset(node, "mongod");
- }
+for (let node of rst.nodes) {
+ checkBridgeOffset(node, "mongod");
+}
- rst.stopSet();
+rst.stopSet();
- // We run ShardingTest under mongobridge with both 1-node replica set shards and stand-alone
- // mongod shards.
- for (let options of[{rs: {nodes: 1}}, {rs: false, shardAsReplicaSet: false}]) {
- resetAllocatedPorts();
+// We run ShardingTest under mongobridge with both 1-node replica set shards and stand-alone
+// mongod shards.
+for (let options of [{rs: {nodes: 1}}, {rs: false, shardAsReplicaSet: false}]) {
+ resetAllocatedPorts();
- const numMongos = 5;
- const numShards = 5;
- const st = new ShardingTest(Object.assign({
- mongos: numMongos,
- shards: numShards,
- config: {nodes: 1},
- useBridge: true,
- },
- options));
+ const numMongos = 5;
+ const numShards = 5;
+ const st = new ShardingTest(Object.assign({
+ mongos: numMongos,
+ shards: numShards,
+ config: {nodes: 1},
+ useBridge: true,
+ },
+ options));
- for (let i = 0; i < numMongos; ++i) {
- checkBridgeOffset(st["s" + i], "mongos");
- }
+ for (let i = 0; i < numMongos; ++i) {
+ checkBridgeOffset(st["s" + i], "mongos");
+ }
- for (let configServer of st.configRS.nodes) {
- checkBridgeOffset(configServer, "config server");
- }
+ for (let configServer of st.configRS.nodes) {
+ checkBridgeOffset(configServer, "config server");
+ }
- for (let i = 0; i < numShards; ++i) {
- if (options.rs) {
- for (let node of st["rs" + i].nodes) {
- checkBridgeOffset(node, "shard");
- }
- } else {
- checkBridgeOffset(st["d" + i], "shard");
+ for (let i = 0; i < numShards; ++i) {
+ if (options.rs) {
+ for (let node of st["rs" + i].nodes) {
+ checkBridgeOffset(node, "shard");
}
+ } else {
+ checkBridgeOffset(st["d" + i], "shard");
}
-
- st.stop();
}
+
+ st.stop();
+}
})();
diff --git a/jstests/noPassthrough/shell_quit.js b/jstests/noPassthrough/shell_quit.js
index 7ff45368f00..17721119846 100644
--- a/jstests/noPassthrough/shell_quit.js
+++ b/jstests/noPassthrough/shell_quit.js
@@ -1,17 +1,17 @@
(function() {
- 'use strict';
- var checkShell = function(retCode) {
- var args = [
- "mongo",
- "--nodb",
- "--eval",
- "quit(" + retCode + ");",
- ];
+'use strict';
+var checkShell = function(retCode) {
+ var args = [
+ "mongo",
+ "--nodb",
+ "--eval",
+ "quit(" + retCode + ");",
+ ];
- var actualRetCode = _runMongoProgram.apply(null, args);
- assert.eq(retCode, actualRetCode);
- };
+ var actualRetCode = _runMongoProgram.apply(null, args);
+ assert.eq(retCode, actualRetCode);
+};
- checkShell(0);
- checkShell(5);
+checkShell(0);
+checkShell(5);
})();
diff --git a/jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js b/jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js
index ca052915fe7..0010d54bff2 100644
--- a/jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js
+++ b/jstests/noPassthrough/shell_retry_writes_on_retryable_errors.js
@@ -4,129 +4,127 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const dbName = "test";
- const collName = jsTest.name();
-
- const rsConn = new Mongo(rst.getURL());
- const db = rsConn.startSession({retryWrites: true}).getDatabase(dbName);
-
- // We configure the mongo shell to log its retry attempts so there are more diagnostics
- // available in case this test ever fails.
- TestData.logRetryAttempts = true;
-
- /**
- * The testCommandIsRetried() function serves as the fixture for writing test cases which run
- * commands against the server and assert that the mongo shell retries them correctly.
- *
- * The 'testFn' parameter is a function that performs an arbitrary number of operations against
- * the database. The command requests that the mongo shell attempts to send to the server
- * (including any command requests which are retried) are then specified as the sole argument to
- * the 'assertFn' parameter.
- *
- * The testFn(enableCapture, disableCapture) function can also selectively turn on and off the
- * capturing of command requests by calling the functions it receives for its first and second
- * parameters, respectively.
- */
- function testCommandIsRetried(testFn, assertFn) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const cmdObjsSeen = [];
-
- let shouldCaptureCmdObjs = true;
-
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- if (shouldCaptureCmdObjs) {
- cmdObjsSeen.push(cmdObj);
- }
-
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
- try {
- assert.doesNotThrow(() => testFn(
- () => {
- shouldCaptureCmdObjs = true;
- },
- () => {
- shouldCaptureCmdObjs = false;
- }));
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
+const dbName = "test";
+const collName = jsTest.name();
+
+const rsConn = new Mongo(rst.getURL());
+const db = rsConn.startSession({retryWrites: true}).getDatabase(dbName);
+
+// We configure the mongo shell to log its retry attempts so there are more diagnostics
+// available in case this test ever fails.
+TestData.logRetryAttempts = true;
+
+/**
+ * The testCommandIsRetried() function serves as the fixture for writing test cases which run
+ * commands against the server and assert that the mongo shell retries them correctly.
+ *
+ * The 'testFn' parameter is a function that performs an arbitrary number of operations against
+ * the database. The command requests that the mongo shell attempts to send to the server
+ * (including any command requests which are retried) are then specified as the sole argument to
+ * the 'assertFn' parameter.
+ *
+ * The testFn(enableCapture, disableCapture) function can also selectively turn on and off the
+ * capturing of command requests by calling the functions it receives for its first and second
+ * parameters, respectively.
+ */
+function testCommandIsRetried(testFn, assertFn) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+ const cmdObjsSeen = [];
+
+ let shouldCaptureCmdObjs = true;
- if (cmdObjsSeen.length === 0) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + testFn.toString());
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ if (shouldCaptureCmdObjs) {
+ cmdObjsSeen.push(cmdObj);
}
- assertFn(cmdObjsSeen);
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
+
+ try {
+ assert.doesNotThrow(() => testFn(
+ () => {
+ shouldCaptureCmdObjs = true;
+ },
+ () => {
+ shouldCaptureCmdObjs = false;
+ }));
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
}
- testCommandIsRetried(
- function testInsertRetriedOnWriteConcernError(enableCapture, disableCapture) {
- disableCapture();
- const secondary = rst.getSecondary();
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
-
- try {
- enableCapture();
- const res = db[collName].insert({}, {writeConcern: {w: 2, wtimeout: 1000}});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- disableCapture();
- } finally {
- // We disable the failpoint in a finally block to prevent a misleading fassert()
- // message from being logged by the secondary when it is shut down with the
- // failpoint enabled.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
- }
- },
- function assertInsertRetriedExactlyOnce(cmdObjsSeen) {
- assert.eq(2, cmdObjsSeen.length, () => tojson(cmdObjsSeen));
- assert(cmdObjsSeen.every(cmdObj => Object.keys(cmdObj)[0] === "insert"),
- () => "expected both attempts to be insert requests: " + tojson(cmdObjsSeen));
- assert.eq(
- cmdObjsSeen[0], cmdObjsSeen[1], "command request changed between retry attempts");
- });
+ if (cmdObjsSeen.length === 0) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + testFn.toString());
+ }
- testCommandIsRetried(
- function testUpdateRetriedOnRetryableCommandError(enableCapture, disableCapture) {
- disableCapture();
+ assertFn(cmdObjsSeen);
+}
- const primary = rst.getPrimary();
- primary.adminCommand({
- configureFailPoint: "onPrimaryTransactionalWrite",
- data: {
- closeConnection: false,
- failBeforeCommitExceptionCode: ErrorCodes.InterruptedDueToReplStateChange
- },
- mode: {times: 1}
- });
+testCommandIsRetried(
+ function testInsertRetriedOnWriteConcernError(enableCapture, disableCapture) {
+ disableCapture();
+ const secondary = rst.getSecondary();
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
+ try {
enableCapture();
- const res = db[collName].update({}, {$set: {a: 1}});
- assert.commandWorked(res);
+ const res = db[collName].insert({}, {writeConcern: {w: 2, wtimeout: 1000}});
+ assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
disableCapture();
-
- primary.adminCommand({configureFailPoint: "onPrimaryTransactionalWrite", mode: "off"});
- },
- function assertUpdateRetriedExactlyOnce(cmdObjsSeen) {
- assert.eq(2, cmdObjsSeen.length, () => tojson(cmdObjsSeen));
- assert(cmdObjsSeen.every(cmdObj => Object.keys(cmdObj)[0] === "update"),
- () => "expected both attempts to be update requests: " + tojson(cmdObjsSeen));
- assert.eq(
- cmdObjsSeen[0], cmdObjsSeen[1], "command request changed between retry attempts");
+ } finally {
+ // We disable the failpoint in a finally block to prevent a misleading fassert()
+ // message from being logged by the secondary when it is shut down with the
+ // failpoint enabled.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
+ }
+ },
+ function assertInsertRetriedExactlyOnce(cmdObjsSeen) {
+ assert.eq(2, cmdObjsSeen.length, () => tojson(cmdObjsSeen));
+ assert(cmdObjsSeen.every(cmdObj => Object.keys(cmdObj)[0] === "insert"),
+ () => "expected both attempts to be insert requests: " + tojson(cmdObjsSeen));
+ assert.eq(cmdObjsSeen[0], cmdObjsSeen[1], "command request changed between retry attempts");
+ });
+
+testCommandIsRetried(
+ function testUpdateRetriedOnRetryableCommandError(enableCapture, disableCapture) {
+ disableCapture();
+
+ const primary = rst.getPrimary();
+ primary.adminCommand({
+ configureFailPoint: "onPrimaryTransactionalWrite",
+ data: {
+ closeConnection: false,
+ failBeforeCommitExceptionCode: ErrorCodes.InterruptedDueToReplStateChange
+ },
+ mode: {times: 1}
});
- rst.stopSet();
+ enableCapture();
+ const res = db[collName].update({}, {$set: {a: 1}});
+ assert.commandWorked(res);
+ disableCapture();
+
+ primary.adminCommand({configureFailPoint: "onPrimaryTransactionalWrite", mode: "off"});
+ },
+ function assertUpdateRetriedExactlyOnce(cmdObjsSeen) {
+ assert.eq(2, cmdObjsSeen.length, () => tojson(cmdObjsSeen));
+ assert(cmdObjsSeen.every(cmdObj => Object.keys(cmdObj)[0] === "update"),
+ () => "expected both attempts to be update requests: " + tojson(cmdObjsSeen));
+ assert.eq(cmdObjsSeen[0], cmdObjsSeen[1], "command request changed between retry attempts");
+ });
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/shell_retry_writes_uri.js b/jstests/noPassthrough/shell_retry_writes_uri.js
index a83fa33eb46..bb591438280 100644
--- a/jstests/noPassthrough/shell_retry_writes_uri.js
+++ b/jstests/noPassthrough/shell_retry_writes_uri.js
@@ -1,148 +1,142 @@
// @tags: [requires_replication]
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- let rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- let mongoUri = "mongodb://" + rst.nodes.map((node) => node.host).join(",") + "/test";
- let conn = rst.nodes[0];
-
- // There are three ways to enable retryable writes in the mongo shell.
- // 1. (cmdline flag) start mongo shell with --retryWrites
- // 2. (uri param) connect to a uri like mongodb://.../test?retryWrites=true
- // 3. (session option) in mongo shell create a new session with {retryWrite: true}
-
- function runShellScript(uri, cmdArgs, insertShouldHaveTxnNumber, shellFn) {
- // This function is stringified and called immediately in the mongo --eval.
- function testWrapper(insertShouldHaveTxnNumber, shellFn) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- let insertFound = false;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- let cmdObjSeen = cmdObj;
- let cmdName = Object.keys(cmdObjSeen)[0];
-
- if (cmdName === "query" || cmdName === "$query") {
- cmdObjSeen = cmdObjSeen[cmdName];
- cmdName = Object.keys(cmdObj)[0];
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+let mongoUri = "mongodb://" + rst.nodes.map((node) => node.host).join(",") + "/test";
+let conn = rst.nodes[0];
+
+// There are three ways to enable retryable writes in the mongo shell.
+// 1. (cmdline flag) start mongo shell with --retryWrites
+// 2. (uri param) connect to a uri like mongodb://.../test?retryWrites=true
+// 3. (session option) in mongo shell create a new session with {retryWrite: true}
+
+function runShellScript(uri, cmdArgs, insertShouldHaveTxnNumber, shellFn) {
+ // This function is stringified and called immediately in the mongo --eval.
+ function testWrapper(insertShouldHaveTxnNumber, shellFn) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+ let insertFound = false;
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ let cmdObjSeen = cmdObj;
+ let cmdName = Object.keys(cmdObjSeen)[0];
+
+ if (cmdName === "query" || cmdName === "$query") {
+ cmdObjSeen = cmdObjSeen[cmdName];
+ cmdName = Object.keys(cmdObj)[0];
+ }
+
+ if (cmdName === "insert") {
+ insertFound = true;
+ if (insertShouldHaveTxnNumber) {
+ assert(cmdObjSeen.hasOwnProperty("txnNumber"),
+ "insert sent without expected txnNumber");
+ } else {
+ assert(!cmdObjSeen.hasOwnProperty("txnNumber"),
+ "insert sent with txnNumber unexpectedly");
}
+ }
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- if (cmdName === "insert") {
- insertFound = true;
- if (insertShouldHaveTxnNumber) {
- assert(cmdObjSeen.hasOwnProperty("txnNumber"),
- "insert sent without expected txnNumber");
- } else {
- assert(!cmdObjSeen.hasOwnProperty("txnNumber"),
- "insert sent with txnNumber unexpectedly");
- }
- }
- return mongoRunCommandOriginal.apply(this, arguments);
- };
-
- shellFn();
- assert(insertFound, "test did not run insert command");
- }
-
- // Construct the string to be passed to eval.
- let script = "(" + testWrapper.toString() + ")(";
- script += insertShouldHaveTxnNumber + ",";
- script += shellFn.toString();
- script += ")";
-
- let args = ["./mongo", uri, "--eval", script].concat(cmdArgs);
- let exitCode = runMongoProgram(...args);
- assert.eq(exitCode, 0, `shell script "${shellFn.name}" exited with ${exitCode}`);
+ shellFn();
+ assert(insertFound, "test did not run insert command");
}
- // Tests --retryWrites command line parameter.
- runShellScript(mongoUri, ["--retryWrites"], true, function flagWorks() {
- assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(db.coll.insert({}), "cannot insert");
- });
-
- // The uri param should override --retryWrites.
- runShellScript(
- mongoUri + "?retryWrites=false", ["--retryWrites"], false, function flagOverridenByUri() {
- assert(!db.getSession().getOptions().shouldRetryWrites(),
- "retryWrites should be false");
- assert.writeOK(db.coll.insert({}), "cannot insert");
- });
-
- // Even if initial connection has retryWrites=false in uri, new connections should not be
- // overriden.
- runShellScript(mongoUri + "?retryWrites=false",
- ["--retryWrites"],
- true,
- function flagNotOverridenByNewConn() {
- let connUri = db.getMongo().host; // does not have ?retryWrites=false.
- let sess = new Mongo(connUri).startSession();
- assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
- });
-
- // Unless that uri also specifies retryWrites.
- runShellScript(mongoUri + "?retryWrites=false",
- ["--retryWrites"],
- false,
- function flagOverridenInNewConn() {
- let connUri = "mongodb://" + db.getMongo().host + "/test?retryWrites=false";
- let sess = new Mongo(connUri).startSession();
- assert(!sess.getOptions().shouldRetryWrites(),
- "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
- });
-
- // Session options should override --retryWrites as well.
- runShellScript(mongoUri, ["--retryWrites"], false, function flagOverridenByOpts() {
- let connUri = "mongodb://" + db.getMongo().host + "/test";
- let sess = new Mongo(connUri).startSession({retryWrites: false});
- assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
- });
-
- // Test uri retryWrites parameter.
- runShellScript(mongoUri + "?retryWrites=true", [], true, function uriTrueWorks() {
- assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
- assert.writeOK(db.coll.insert({}), "cannot insert");
- });
-
- // Test that uri retryWrites=false works.
- runShellScript(mongoUri + "?retryWrites=false", [], false, function uriFalseWorks() {
+ // Construct the string to be passed to eval.
+ let script = "(" + testWrapper.toString() + ")(";
+ script += insertShouldHaveTxnNumber + ",";
+ script += shellFn.toString();
+ script += ")";
+
+ let args = ["./mongo", uri, "--eval", script].concat(cmdArgs);
+ let exitCode = runMongoProgram(...args);
+ assert.eq(exitCode, 0, `shell script "${shellFn.name}" exited with ${exitCode}`);
+}
+
+// Tests --retryWrites command line parameter.
+runShellScript(mongoUri, ["--retryWrites"], true, function flagWorks() {
+ assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
+ assert.writeOK(db.coll.insert({}), "cannot insert");
+});
+
+// The uri param should override --retryWrites.
+runShellScript(
+ mongoUri + "?retryWrites=false", ["--retryWrites"], false, function flagOverridenByUri() {
assert(!db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be false");
assert.writeOK(db.coll.insert({}), "cannot insert");
});
- // Test SessionOptions retryWrites option.
- runShellScript(mongoUri, [], true, function sessOptTrueWorks() {
- let connUri = "mongodb://" + db.getMongo().host + "/test";
- let sess = new Mongo(connUri).startSession({retryWrites: true});
+// Even if initial connection has retryWrites=false in uri, new connections should not be
+// overriden.
+runShellScript(
+ mongoUri + "?retryWrites=false", ["--retryWrites"], true, function flagNotOverridenByNewConn() {
+ let connUri = db.getMongo().host; // does not have ?retryWrites=false.
+ let sess = new Mongo(connUri).startSession();
assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true");
assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
- // Test that SessionOptions retryWrites:false works.
- runShellScript(mongoUri, [], false, function sessOptFalseWorks() {
- let connUri = "mongodb://" + db.getMongo().host + "/test";
- let sess = new Mongo(connUri).startSession({retryWrites: false});
- assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
- assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
- });
-
- // Test that session option overrides uri option.
- runShellScript(mongoUri + "?retryWrites=true", [], false, function sessOptOverridesUri() {
- let sess = db.getMongo().startSession({retryWrites: false});
+// Unless that uri also specifies retryWrites.
+runShellScript(
+ mongoUri + "?retryWrites=false", ["--retryWrites"], false, function flagOverridenInNewConn() {
+ let connUri = "mongodb://" + db.getMongo().host + "/test?retryWrites=false";
+ let sess = new Mongo(connUri).startSession();
assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
});
- rst.stopSet();
+// Session options should override --retryWrites as well.
+runShellScript(mongoUri, ["--retryWrites"], false, function flagOverridenByOpts() {
+ let connUri = "mongodb://" + db.getMongo().host + "/test";
+ let sess = new Mongo(connUri).startSession({retryWrites: false});
+ assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
+ assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+});
+
+// Test uri retryWrites parameter.
+runShellScript(mongoUri + "?retryWrites=true", [], true, function uriTrueWorks() {
+ assert(db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be true");
+ assert.writeOK(db.coll.insert({}), "cannot insert");
+});
+
+// Test that uri retryWrites=false works.
+runShellScript(mongoUri + "?retryWrites=false", [], false, function uriFalseWorks() {
+ assert(!db.getSession().getOptions().shouldRetryWrites(), "retryWrites should be false");
+ assert.writeOK(db.coll.insert({}), "cannot insert");
+});
+
+// Test SessionOptions retryWrites option.
+runShellScript(mongoUri, [], true, function sessOptTrueWorks() {
+ let connUri = "mongodb://" + db.getMongo().host + "/test";
+ let sess = new Mongo(connUri).startSession({retryWrites: true});
+ assert(sess.getOptions().shouldRetryWrites(), "retryWrites should be true");
+ assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+});
+
+// Test that SessionOptions retryWrites:false works.
+runShellScript(mongoUri, [], false, function sessOptFalseWorks() {
+ let connUri = "mongodb://" + db.getMongo().host + "/test";
+ let sess = new Mongo(connUri).startSession({retryWrites: false});
+ assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
+ assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+});
+
+// Test that session option overrides uri option.
+runShellScript(mongoUri + "?retryWrites=true", [], false, function sessOptOverridesUri() {
+ let sess = db.getMongo().startSession({retryWrites: false});
+ assert(!sess.getOptions().shouldRetryWrites(), "retryWrites should be false");
+ assert.writeOK(sess.getDatabase("test").coll.insert({}), "cannot insert");
+});
+
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/shell_session_option_defaults.js b/jstests/noPassthrough/shell_session_option_defaults.js
index 16c4e3860d6..b865914a99b 100644
--- a/jstests/noPassthrough/shell_session_option_defaults.js
+++ b/jstests/noPassthrough/shell_session_option_defaults.js
@@ -2,80 +2,80 @@
* Tests the default values for causal consistency and retryable writes as part of SessionOptions.
*/
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
+const conn = MongoRunner.runMongod();
- let session = conn.startSession();
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be implicitly enabled for an explicit session");
- assert(!session.getOptions().shouldRetryWrites(),
- "Retryable writes should not be implicitly enabled for an explicit session");
- session.endSession();
+let session = conn.startSession();
+assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be implicitly enabled for an explicit session");
+assert(!session.getOptions().shouldRetryWrites(),
+ "Retryable writes should not be implicitly enabled for an explicit session");
+session.endSession();
- session = conn.startSession({causalConsistency: true});
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be able to be explicitly enabled");
- assert(!session.getOptions().shouldRetryWrites(),
- "Retryable writes should not be implicitly enabled for an explicit session");
- session.endSession();
+session = conn.startSession({causalConsistency: true});
+assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be able to be explicitly enabled");
+assert(!session.getOptions().shouldRetryWrites(),
+ "Retryable writes should not be implicitly enabled for an explicit session");
+session.endSession();
- session = conn.startSession({causalConsistency: false});
- assert(!session.getOptions().isCausalConsistency(),
- "Causal consistency should be able to be explicitly disabled");
- assert(!session.getOptions().shouldRetryWrites(),
- "Retryable writes should not be implicitly enabled for an explicit session");
- session.endSession();
+session = conn.startSession({causalConsistency: false});
+assert(!session.getOptions().isCausalConsistency(),
+ "Causal consistency should be able to be explicitly disabled");
+assert(!session.getOptions().shouldRetryWrites(),
+ "Retryable writes should not be implicitly enabled for an explicit session");
+session.endSession();
+
+session = conn.startSession({retryWrites: false});
+assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be implicitly enabled for an explicit session");
+assert(!session.getOptions().shouldRetryWrites(),
+ "Retryable writes should be able to be explicitly disabled");
+session.endSession();
+
+session = conn.startSession({retryWrites: true});
+assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be implicitly enabled for an explicit session");
+assert(session.getOptions().shouldRetryWrites(),
+ "Retryable writes should be able to be explicitly enabled");
+session.endSession();
+
+function runMongoShellWithRetryWritesEnabled(func) {
+ const args = [MongoRunner.mongoShellPath];
+ args.push("--port", conn.port);
+ args.push("--retryWrites");
- session = conn.startSession({retryWrites: false});
+ const jsCode = "(" + func.toString() + ")()";
+ args.push("--eval", jsCode);
+
+ const exitCode = runMongoProgram.apply(null, args);
+ assert.eq(0, exitCode, "Encountered an error in the other mongo shell");
+}
+
+runMongoShellWithRetryWritesEnabled(function() {
+ let session = db.getSession();
+ assert(session.getOptions().isCausalConsistency(),
+ "Causal consistency should be implicitly enabled for an explicit session");
+ assert(session.getOptions().shouldRetryWrites(),
+ "Retryable writes should be implicitly enabled on default session when using" +
+ " --retryWrites");
+
+ session = db.getMongo().startSession({retryWrites: false});
assert(session.getOptions().isCausalConsistency(),
"Causal consistency should be implicitly enabled for an explicit session");
assert(!session.getOptions().shouldRetryWrites(),
"Retryable writes should be able to be explicitly disabled");
session.endSession();
- session = conn.startSession({retryWrites: true});
+ session = db.getMongo().startSession();
assert(session.getOptions().isCausalConsistency(),
"Causal consistency should be implicitly enabled for an explicit session");
assert(session.getOptions().shouldRetryWrites(),
- "Retryable writes should be able to be explicitly enabled");
+ "Retryable writes should be implicitly enabled on new sessions when using" +
+ " --retryWrites");
session.endSession();
+});
- function runMongoShellWithRetryWritesEnabled(func) {
- const args = [MongoRunner.mongoShellPath];
- args.push("--port", conn.port);
- args.push("--retryWrites");
-
- const jsCode = "(" + func.toString() + ")()";
- args.push("--eval", jsCode);
-
- const exitCode = runMongoProgram.apply(null, args);
- assert.eq(0, exitCode, "Encountered an error in the other mongo shell");
- }
-
- runMongoShellWithRetryWritesEnabled(function() {
- let session = db.getSession();
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be implicitly enabled for an explicit session");
- assert(session.getOptions().shouldRetryWrites(),
- "Retryable writes should be implicitly enabled on default session when using" +
- " --retryWrites");
-
- session = db.getMongo().startSession({retryWrites: false});
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be implicitly enabled for an explicit session");
- assert(!session.getOptions().shouldRetryWrites(),
- "Retryable writes should be able to be explicitly disabled");
- session.endSession();
-
- session = db.getMongo().startSession();
- assert(session.getOptions().isCausalConsistency(),
- "Causal consistency should be implicitly enabled for an explicit session");
- assert(session.getOptions().shouldRetryWrites(),
- "Retryable writes should be implicitly enabled on new sessions when using" +
- " --retryWrites");
- session.endSession();
- });
-
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/shutdown_while_fsync_locked.js b/jstests/noPassthrough/shutdown_while_fsync_locked.js
index 5d611741ce2..d234fa444eb 100644
--- a/jstests/noPassthrough/shutdown_while_fsync_locked.js
+++ b/jstests/noPassthrough/shutdown_while_fsync_locked.js
@@ -2,14 +2,14 @@
* Ensure that we allow mongod to shutdown cleanly while being fsync locked.
*/
(function() {
- "use strict";
+"use strict";
- let conn = MongoRunner.runMongod();
- let db = conn.getDB("test");
+let conn = MongoRunner.runMongod();
+let db = conn.getDB("test");
- for (let i = 0; i < 10; i++) {
- assert.commandWorked(db.adminCommand({fsync: 1, lock: 1}));
- }
+for (let i = 0; i < 10; i++) {
+ assert.commandWorked(db.adminCommand({fsync: 1, lock: 1}));
+}
- MongoRunner.stopMongod(conn, MongoRunner.EXIT_CLEAN, {skipValidation: true});
+MongoRunner.stopMongod(conn, MongoRunner.EXIT_CLEAN, {skipValidation: true});
}());
diff --git a/jstests/noPassthrough/skip_sharding_configuration_checks.js b/jstests/noPassthrough/skip_sharding_configuration_checks.js
index eb067f94a52..8573a223979 100644
--- a/jstests/noPassthrough/skip_sharding_configuration_checks.js
+++ b/jstests/noPassthrough/skip_sharding_configuration_checks.js
@@ -4,51 +4,50 @@
* requires_majority_read_concern]
*/
(function() {
- 'use strict';
-
- function expectState(rst, state) {
- assert.soon(function() {
- var status = rst.status();
- if (status.myState != state) {
- print("Waiting for state " + state + " in replSetGetStatus output: " +
- tojson(status));
- }
- return status.myState == state;
- });
- }
-
- let configSvr = MongoRunner.runMongod(
- {configsvr: "", setParameter: 'skipShardingConfigurationChecks=true'});
- assert.eq(configSvr, null);
-
- let shardSvr =
- MongoRunner.runMongod({shardsvr: "", setParameter: 'skipShardingConfigurationChecks=true'});
- assert.eq(shardSvr, null);
-
- var st = new ShardingTest({name: "skipConfig", shards: {rs0: {nodes: 1}}});
- var configRS = st.configRS;
- var shardRS = st.rs0;
-
- shardRS.stopSet(15, true);
- configRS.stopSet(undefined, true);
-
- jsTestLog("Restarting configRS as a standalone ReplicaSet");
-
- for (let i = 0; i < configRS.nodes.length; i++) {
- delete configRS.nodes[i].fullOptions.configsvr;
- configRS.nodes[i].fullOptions.setParameter = 'skipShardingConfigurationChecks=true';
- }
- configRS.startSet({}, true);
- expectState(configRS, ReplSetTest.State.PRIMARY);
- configRS.stopSet();
-
- jsTestLog("Restarting shardRS as a standalone ReplicaSet");
- for (let i = 0; i < shardRS.nodes.length; i++) {
- delete shardRS.nodes[i].fullOptions.shardsvr;
- shardRS.nodes[i].fullOptions.setParameter = 'skipShardingConfigurationChecks=true';
- }
- shardRS.startSet({}, true);
- expectState(shardRS, ReplSetTest.State.PRIMARY);
- shardRS.stopSet();
- MongoRunner.stopMongos(st.s);
+'use strict';
+
+function expectState(rst, state) {
+ assert.soon(function() {
+ var status = rst.status();
+ if (status.myState != state) {
+ print("Waiting for state " + state + " in replSetGetStatus output: " + tojson(status));
+ }
+ return status.myState == state;
+ });
+}
+
+let configSvr =
+ MongoRunner.runMongod({configsvr: "", setParameter: 'skipShardingConfigurationChecks=true'});
+assert.eq(configSvr, null);
+
+let shardSvr =
+ MongoRunner.runMongod({shardsvr: "", setParameter: 'skipShardingConfigurationChecks=true'});
+assert.eq(shardSvr, null);
+
+var st = new ShardingTest({name: "skipConfig", shards: {rs0: {nodes: 1}}});
+var configRS = st.configRS;
+var shardRS = st.rs0;
+
+shardRS.stopSet(15, true);
+configRS.stopSet(undefined, true);
+
+jsTestLog("Restarting configRS as a standalone ReplicaSet");
+
+for (let i = 0; i < configRS.nodes.length; i++) {
+ delete configRS.nodes[i].fullOptions.configsvr;
+ configRS.nodes[i].fullOptions.setParameter = 'skipShardingConfigurationChecks=true';
+}
+configRS.startSet({}, true);
+expectState(configRS, ReplSetTest.State.PRIMARY);
+configRS.stopSet();
+
+jsTestLog("Restarting shardRS as a standalone ReplicaSet");
+for (let i = 0; i < shardRS.nodes.length; i++) {
+ delete shardRS.nodes[i].fullOptions.shardsvr;
+ shardRS.nodes[i].fullOptions.setParameter = 'skipShardingConfigurationChecks=true';
+}
+shardRS.startSet({}, true);
+expectState(shardRS, ReplSetTest.State.PRIMARY);
+shardRS.stopSet();
+MongoRunner.stopMongos(st.s);
})();
diff --git a/jstests/noPassthrough/skip_write_conflict_retries_failpoint.js b/jstests/noPassthrough/skip_write_conflict_retries_failpoint.js
index 538ba853938..fb56434fd61 100644
--- a/jstests/noPassthrough/skip_write_conflict_retries_failpoint.js
+++ b/jstests/noPassthrough/skip_write_conflict_retries_failpoint.js
@@ -9,59 +9,59 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB("test");
- const testColl = testDB.getCollection("skip_write_conflict_retries_failpoint");
+const primary = rst.getPrimary();
+const testDB = primary.getDB("test");
+const testColl = testDB.getCollection("skip_write_conflict_retries_failpoint");
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(testDB.getName());
- const sessionColl = sessionDB.getCollection(testColl.getName());
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(testDB.getName());
+const sessionColl = sessionDB.getCollection(testColl.getName());
- assert.commandWorked(testColl.runCommand(
- "createIndexes",
- {indexes: [{key: {a: 1}, name: "a_1", unique: true}], writeConcern: {w: "majority"}}));
+assert.commandWorked(testColl.runCommand(
+ "createIndexes",
+ {indexes: [{key: {a: 1}, name: "a_1", unique: true}], writeConcern: {w: "majority"}}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "alwaysOn"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "alwaysOn"}));
- // A non-transactional insert would ordinarily keep retrying if it conflicts with a write
- // operation performed inside a multi-statement transaction. However, with the
- // "skipWriteConflictRetries" failpoint enabled, the non-transactional insert should immediately
- // fail with a WriteConflict error response.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "from transaction", a: 0}));
+// A non-transactional insert would ordinarily keep retrying if it conflicts with a write
+// operation performed inside a multi-statement transaction. However, with the
+// "skipWriteConflictRetries" failpoint enabled, the non-transactional insert should immediately
+// fail with a WriteConflict error response.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "from transaction", a: 0}));
- assert.commandFailedWithCode(testColl.insert({_id: "from outside transaction", a: 0}),
- ErrorCodes.WriteConflict);
+assert.commandFailedWithCode(testColl.insert({_id: "from outside transaction", a: 0}),
+ ErrorCodes.WriteConflict);
- assert.commandWorked(session.commitTransaction_forTesting());
- assert.eq(testColl.findOne({a: 0}), {_id: "from transaction", a: 0});
+assert.commandWorked(session.commitTransaction_forTesting());
+assert.eq(testColl.findOne({a: 0}), {_id: "from transaction", a: 0});
- // A non-transactional update would ordinarily keep retrying if it conflicts with a write
- // operation performed inside a multi-statement transaction. However, with the
- // "skipWriteConflictRetries" failpoint enabled, the non-transactional insert should immediately
- // fail with a WriteConflict error response.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "from prepared transaction", a: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// A non-transactional update would ordinarily keep retrying if it conflicts with a write
+// operation performed inside a multi-statement transaction. However, with the
+// "skipWriteConflictRetries" failpoint enabled, the non-transactional insert should immediately
+// fail with a WriteConflict error response.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "from prepared transaction", a: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandFailedWithCode(testColl.update({_id: "from transaction"}, {$set: {a: 1}}),
- ErrorCodes.WriteConflict);
+assert.commandFailedWithCode(testColl.update({_id: "from transaction"}, {$set: {a: 1}}),
+ ErrorCodes.WriteConflict);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testColl.findOne({a: 1}), {_id: "from prepared transaction", a: 1});
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testColl.findOne({a: 1}), {_id: "from prepared transaction", a: 1});
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "off"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "off"}));
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/snapshotWindow_serverParameters.js b/jstests/noPassthrough/snapshotWindow_serverParameters.js
index 4b41954ed61..fa0f004c511 100644
--- a/jstests/noPassthrough/snapshotWindow_serverParameters.js
+++ b/jstests/noPassthrough/snapshotWindow_serverParameters.js
@@ -2,84 +2,84 @@
// and via setParameter command.
(function() {
- 'use strict';
+'use strict';
- load("jstests/noPassthrough/libs/server_parameter_helpers.js");
+load("jstests/noPassthrough/libs/server_parameter_helpers.js");
- // Valid parameter values are in the range [0, infinity).
- testNumericServerParameter("maxTargetSnapshotHistoryWindowInSeconds",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 5 /*defaultValue*/,
- 30 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- -1 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [0, infinity).
+testNumericServerParameter("maxTargetSnapshotHistoryWindowInSeconds",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 5 /*defaultValue*/,
+ 30 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ -1 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
- // Valid parameter values are in the range [0, 100].
- testNumericServerParameter("cachePressureThreshold",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 50 /*defaultValue*/,
- 70 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- -1 /*lowerOutOfBounds*/,
- true /*hasUpperBound*/,
- 101 /*upperOutOfBounds*/);
+// Valid parameter values are in the range [0, 100].
+testNumericServerParameter("cachePressureThreshold",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 50 /*defaultValue*/,
+ 70 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ -1 /*lowerOutOfBounds*/,
+ true /*hasUpperBound*/,
+ 101 /*upperOutOfBounds*/);
- // Valid parameter values are in the range (0, 1).
- testNumericServerParameter("snapshotWindowMultiplicativeDecrease",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 0.75 /*defaultValue*/,
- 0.50 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- -1 /*lowerOutOfBounds*/,
- true /*hasUpperBound*/,
- 1.1 /*upperOutOfBounds*/);
+// Valid parameter values are in the range (0, 1).
+testNumericServerParameter("snapshotWindowMultiplicativeDecrease",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 0.75 /*defaultValue*/,
+ 0.50 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ -1 /*lowerOutOfBounds*/,
+ true /*hasUpperBound*/,
+ 1.1 /*upperOutOfBounds*/);
- // Valid parameter values are in the range [1, infinity).
- testNumericServerParameter("snapshotWindowAdditiveIncreaseSeconds",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 2 /*defaultValue*/,
- 10 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [1, infinity).
+testNumericServerParameter("snapshotWindowAdditiveIncreaseSeconds",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 2 /*defaultValue*/,
+ 10 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
- // Valid parameter values are in the range [1, infinity).
- testNumericServerParameter("checkCachePressurePeriodSeconds",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 5 /*defaultValue*/,
- 8 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [1, infinity).
+testNumericServerParameter("checkCachePressurePeriodSeconds",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 5 /*defaultValue*/,
+ 8 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
- // Valid parameter values are in the range [1, infinity).
- testNumericServerParameter("minMillisBetweenSnapshotWindowInc",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 500 /*defaultValue*/,
- 2 * 1000 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [1, infinity).
+testNumericServerParameter("minMillisBetweenSnapshotWindowInc",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 500 /*defaultValue*/,
+ 2 * 1000 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
- // Valid parameter values are in the range [1, infinity).
- testNumericServerParameter("minMillisBetweenSnapshotWindowDec",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 500 /*defaultValue*/,
- 2 * 1000 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [1, infinity).
+testNumericServerParameter("minMillisBetweenSnapshotWindowDec",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 500 /*defaultValue*/,
+ 2 * 1000 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
})();
diff --git a/jstests/noPassthrough/snapshot_cursor_integrity.js b/jstests/noPassthrough/snapshot_cursor_integrity.js
index 6916bee74e7..b69eb97f848 100644
--- a/jstests/noPassthrough/snapshot_cursor_integrity.js
+++ b/jstests/noPassthrough/snapshot_cursor_integrity.js
@@ -2,155 +2,155 @@
// transaction/session. Specifically tests this in the context of snapshot cursors.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- // This test makes assertions on commands run without logical session ids.
- TestData.disableImplicitSessions = true;
-
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primaryDB = rst.getPrimary().getDB(dbName);
-
- const session1 = primaryDB.getMongo().startSession();
- const sessionDB1 = session1.getDatabase(dbName);
-
- const session2 = primaryDB.getMongo().startSession();
- const sessionDB2 = session2.getDatabase(dbName);
-
- const bulk = primaryDB.coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; ++i) {
- bulk.insert({_id: i});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- // Establish a snapshot cursor in session1.
- let res = assert.commandWorked(sessionDB1.runCommand({
- find: collName,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(0),
- autocommit: false,
- startTransaction: true,
- batchSize: 2
- }));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- let cursorID = res.cursor.id;
-
- // The cursor may not be iterated outside of any session.
- assert.commandFailedWithCode(
- primaryDB.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50737);
-
- // The cursor can still be iterated in session1.
- assert.commandWorked(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(0),
- batchSize: 2
- }));
-
- // The cursor may not be iterated in a different session.
- assert.commandFailedWithCode(
- sessionDB2.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50738);
-
- // The cursor can still be iterated in session1.
- assert.commandWorked(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(0),
- batchSize: 2
- }));
-
- // The cursor may not be iterated outside of any transaction.
- assert.commandFailedWithCode(
- sessionDB1.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50740);
-
- // The cursor can still be iterated in its transaction in session1.
- assert.commandWorked(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(0),
- batchSize: 2
- }));
-
- // The cursor may not be iterated in a different transaction on session1.
- assert.commandWorked(sessionDB1.runCommand({
- find: collName,
- txnNumber: NumberLong(1),
- autocommit: false,
- readConcern: {level: "snapshot"},
- startTransaction: true
- }));
- assert.commandFailedWithCode(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(1),
- batchSize: 2
- }),
- 50741);
-
- // The cursor can no longer be iterated because its transaction has ended.
- assert.commandFailedWithCode(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(0),
- batchSize: 2
- }),
- ErrorCodes.TransactionTooOld);
-
- // Kill the cursor.
- assert.commandWorked(
- sessionDB1.runCommand({killCursors: sessionDB1.coll.getName(), cursors: [cursorID]}));
-
- // Establish a cursor outside of any transaction in session1.
- res = assert.commandWorked(sessionDB1.runCommand({find: collName, batchSize: 2}));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- cursorID = res.cursor.id;
-
- // The cursor may not be iterated inside a transaction.
- assert.commandWorked(sessionDB1.runCommand({
- find: collName,
- txnNumber: NumberLong(2),
- autocommit: false,
- readConcern: {level: "snapshot"},
- startTransaction: true
- }));
- assert.commandFailedWithCode(sessionDB1.runCommand({
- getMore: cursorID,
- collection: collName,
- autocommit: false,
- txnNumber: NumberLong(2),
- batchSize: 2
- }),
- 50739);
-
- // The cursor can still be iterated outside of any transaction. Exhaust the cursor.
- assert.commandWorked(sessionDB1.runCommand({getMore: cursorID, collection: collName}));
-
- // Establish a cursor outside of any session.
- res = assert.commandWorked(primaryDB.runCommand({find: collName, batchSize: 2}));
- assert(res.hasOwnProperty("cursor"));
- assert(res.cursor.hasOwnProperty("id"));
- cursorID = res.cursor.id;
-
- // The cursor may not be iterated inside a session.
- assert.commandFailedWithCode(
- sessionDB1.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50736);
-
- // The cursor can still be iterated outside of any session. Exhaust the cursor.
- assert.commandWorked(primaryDB.runCommand({getMore: cursorID, collection: collName}));
-
- session1.endSession();
- session2.endSession();
- rst.stopSet();
+"use strict";
+
+// This test makes assertions on commands run without logical session ids.
+TestData.disableImplicitSessions = true;
+
+const dbName = "test";
+const collName = "coll";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primaryDB = rst.getPrimary().getDB(dbName);
+
+const session1 = primaryDB.getMongo().startSession();
+const sessionDB1 = session1.getDatabase(dbName);
+
+const session2 = primaryDB.getMongo().startSession();
+const sessionDB2 = session2.getDatabase(dbName);
+
+const bulk = primaryDB.coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 10; ++i) {
+ bulk.insert({_id: i});
+}
+assert.commandWorked(bulk.execute({w: "majority"}));
+
+// Establish a snapshot cursor in session1.
+let res = assert.commandWorked(sessionDB1.runCommand({
+ find: collName,
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(0),
+ autocommit: false,
+ startTransaction: true,
+ batchSize: 2
+}));
+assert(res.hasOwnProperty("cursor"));
+assert(res.cursor.hasOwnProperty("id"));
+let cursorID = res.cursor.id;
+
+// The cursor may not be iterated outside of any session.
+assert.commandFailedWithCode(
+ primaryDB.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50737);
+
+// The cursor can still be iterated in session1.
+assert.commandWorked(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(0),
+ batchSize: 2
+}));
+
+// The cursor may not be iterated in a different session.
+assert.commandFailedWithCode(
+ sessionDB2.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50738);
+
+// The cursor can still be iterated in session1.
+assert.commandWorked(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(0),
+ batchSize: 2
+}));
+
+// The cursor may not be iterated outside of any transaction.
+assert.commandFailedWithCode(
+ sessionDB1.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50740);
+
+// The cursor can still be iterated in its transaction in session1.
+assert.commandWorked(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(0),
+ batchSize: 2
+}));
+
+// The cursor may not be iterated in a different transaction on session1.
+assert.commandWorked(sessionDB1.runCommand({
+ find: collName,
+ txnNumber: NumberLong(1),
+ autocommit: false,
+ readConcern: {level: "snapshot"},
+ startTransaction: true
+}));
+assert.commandFailedWithCode(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(1),
+ batchSize: 2
+}),
+ 50741);
+
+// The cursor can no longer be iterated because its transaction has ended.
+assert.commandFailedWithCode(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(0),
+ batchSize: 2
+}),
+ ErrorCodes.TransactionTooOld);
+
+// Kill the cursor.
+assert.commandWorked(
+ sessionDB1.runCommand({killCursors: sessionDB1.coll.getName(), cursors: [cursorID]}));
+
+// Establish a cursor outside of any transaction in session1.
+res = assert.commandWorked(sessionDB1.runCommand({find: collName, batchSize: 2}));
+assert(res.hasOwnProperty("cursor"));
+assert(res.cursor.hasOwnProperty("id"));
+cursorID = res.cursor.id;
+
+// The cursor may not be iterated inside a transaction.
+assert.commandWorked(sessionDB1.runCommand({
+ find: collName,
+ txnNumber: NumberLong(2),
+ autocommit: false,
+ readConcern: {level: "snapshot"},
+ startTransaction: true
+}));
+assert.commandFailedWithCode(sessionDB1.runCommand({
+ getMore: cursorID,
+ collection: collName,
+ autocommit: false,
+ txnNumber: NumberLong(2),
+ batchSize: 2
+}),
+ 50739);
+
+// The cursor can still be iterated outside of any transaction. Exhaust the cursor.
+assert.commandWorked(sessionDB1.runCommand({getMore: cursorID, collection: collName}));
+
+// Establish a cursor outside of any session.
+res = assert.commandWorked(primaryDB.runCommand({find: collName, batchSize: 2}));
+assert(res.hasOwnProperty("cursor"));
+assert(res.cursor.hasOwnProperty("id"));
+cursorID = res.cursor.id;
+
+// The cursor may not be iterated inside a session.
+assert.commandFailedWithCode(
+ sessionDB1.runCommand({getMore: cursorID, collection: collName, batchSize: 2}), 50736);
+
+// The cursor can still be iterated outside of any session. Exhaust the cursor.
+assert.commandWorked(primaryDB.runCommand({getMore: cursorID, collection: collName}));
+
+session1.endSession();
+session2.endSession();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js b/jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js
index f73eb952043..a9cc8f40d9b 100644
--- a/jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js
+++ b/jstests/noPassthrough/snapshot_cursor_shutdown_stepdown.js
@@ -1,99 +1,98 @@
// Tests that stashed transaction resources are destroyed at shutdown and stepdown.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- //
- // Test that stashed transaction resources are destroyed at shutdown.
- //
-
- let rst = new ReplSetTest({nodes: 1});
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+//
+// Test that stashed transaction resources are destroyed at shutdown.
+//
+
+let rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+let primaryDB = rst.getPrimary().getDB(dbName);
+
+let session = primaryDB.getMongo().startSession();
+let sessionDB = session.getDatabase(dbName);
+
+for (let i = 0; i < 4; i++) {
+ assert.commandWorked(sessionDB.coll.insert({_id: i}, {writeConcern: {w: "majority"}}));
+}
+
+// Create a snapshot read cursor.
+assert.commandWorked(sessionDB.runCommand({
+ find: collName,
+ batchSize: 2,
+ readConcern: {level: "snapshot"},
+ startTransaction: true,
+ autocommit: false,
+ txnNumber: NumberLong(0)
+}));
+
+// It should be possible to shut down the server without hanging. We must skip collection
+// validation, since this will hang.
+const signal = true; // Use default kill signal.
+const forRestart = false;
+rst.stopSet(signal, forRestart, {skipValidation: true});
+
+function testStepdown(stepdownFunc) {
+ rst = new ReplSetTest({nodes: 2});
rst.startSet();
rst.initiate();
- let primaryDB = rst.getPrimary().getDB(dbName);
+ const primary = rst.getPrimary();
+ const primaryDB = primary.getDB(dbName);
- let session = primaryDB.getMongo().startSession();
- let sessionDB = session.getDatabase(dbName);
+ const session = primaryDB.getMongo().startSession();
+ const sessionDB = session.getDatabase(dbName);
for (let i = 0; i < 4; i++) {
assert.commandWorked(sessionDB.coll.insert({_id: i}, {writeConcern: {w: "majority"}}));
}
// Create a snapshot read cursor.
- assert.commandWorked(sessionDB.runCommand({
+ const res = assert.commandWorked(sessionDB.runCommand({
find: collName,
batchSize: 2,
readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(0),
startTransaction: true,
- autocommit: false,
- txnNumber: NumberLong(0)
+ autocommit: false
}));
-
- // It should be possible to shut down the server without hanging. We must skip collection
- // validation, since this will hang.
- const signal = true; // Use default kill signal.
- const forRestart = false;
- rst.stopSet(signal, forRestart, {skipValidation: true});
-
- function testStepdown(stepdownFunc) {
- rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
-
- const session = primaryDB.getMongo().startSession();
- const sessionDB = session.getDatabase(dbName);
-
- for (let i = 0; i < 4; i++) {
- assert.commandWorked(sessionDB.coll.insert({_id: i}, {writeConcern: {w: "majority"}}));
- }
-
- // Create a snapshot read cursor.
- const res = assert.commandWorked(sessionDB.runCommand({
- find: collName,
- batchSize: 2,
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(0),
- startTransaction: true,
- autocommit: false
- }));
- assert(res.hasOwnProperty("cursor"), tojson(res));
- assert(res.cursor.hasOwnProperty("id"), tojson(res));
- const cursorId = res.cursor.id;
-
- // It should be possible to step down the primary without hanging.
- stepdownFunc(rst);
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Kill the cursor.
- assert.commandWorked(sessionDB.runCommand({killCursors: collName, cursors: [cursorId]}));
- rst.stopSet();
- }
-
- //
- // Test that stashed transaction resources are destroyed at stepdown triggered by
- // replSetStepDown.
- //
- function replSetStepDown(replSetTest) {
- assert.commandWorked(
- replSetTest.getPrimary().adminCommand({replSetStepDown: 60, force: true}));
- }
- testStepdown(replSetStepDown);
-
- //
- // Test that stashed transaction resources are destroyed at stepdown triggered by loss of
- // quorum.
- //
- function stepDownOnLossOfQuorum(replSetTest) {
- const secondary = rst.getSecondary();
- const secondaryId = rst.getNodeId(secondary);
- rst.stop(secondaryId);
- }
- testStepdown(stepDownOnLossOfQuorum);
+ assert(res.hasOwnProperty("cursor"), tojson(res));
+ assert(res.cursor.hasOwnProperty("id"), tojson(res));
+ const cursorId = res.cursor.id;
+
+ // It should be possible to step down the primary without hanging.
+ stepdownFunc(rst);
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+ // Kill the cursor.
+ assert.commandWorked(sessionDB.runCommand({killCursors: collName, cursors: [cursorId]}));
+ rst.stopSet();
+}
+
+//
+// Test that stashed transaction resources are destroyed at stepdown triggered by
+// replSetStepDown.
+//
+function replSetStepDown(replSetTest) {
+ assert.commandWorked(replSetTest.getPrimary().adminCommand({replSetStepDown: 60, force: true}));
+}
+testStepdown(replSetStepDown);
+
+//
+// Test that stashed transaction resources are destroyed at stepdown triggered by loss of
+// quorum.
+//
+function stepDownOnLossOfQuorum(replSetTest) {
+ const secondary = rst.getSecondary();
+ const secondaryId = rst.getNodeId(secondary);
+ rst.stop(secondaryId);
+}
+testStepdown(stepDownOnLossOfQuorum);
})();
diff --git a/jstests/noPassthrough/snapshot_reads.js b/jstests/noPassthrough/snapshot_reads.js
index 75b2bc9c41b..9c82a24af7e 100644
--- a/jstests/noPassthrough/snapshot_reads.js
+++ b/jstests/noPassthrough/snapshot_reads.js
@@ -1,131 +1,129 @@
// Tests snapshot isolation on readConcern level snapshot read.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- let conf = rst.getReplSetConfig();
- conf.members[1].votes = 0;
- conf.members[1].priority = 0;
- rst.initiate(conf);
-
- const primaryDB = rst.getPrimary().getDB(dbName);
-
- function parseCursor(cmdResult) {
- if (cmdResult.hasOwnProperty("cursor")) {
- assert(cmdResult.cursor.hasOwnProperty("id"));
- return cmdResult.cursor;
- } else if (cmdResult.hasOwnProperty("cursors") && cmdResult.cursors.length === 1 &&
- cmdResult.cursors[0].hasOwnProperty("cursor")) {
- assert(cmdResult.cursors[0].cursor.hasOwnProperty("id"));
- return cmdResult.cursors[0].cursor;
- }
-
- throw Error("parseCursor failed to find cursor object. Command Result: " +
- tojson(cmdResult));
+"use strict";
+
+const dbName = "test";
+const collName = "coll";
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+let conf = rst.getReplSetConfig();
+conf.members[1].votes = 0;
+conf.members[1].priority = 0;
+rst.initiate(conf);
+
+const primaryDB = rst.getPrimary().getDB(dbName);
+
+function parseCursor(cmdResult) {
+ if (cmdResult.hasOwnProperty("cursor")) {
+ assert(cmdResult.cursor.hasOwnProperty("id"));
+ return cmdResult.cursor;
+ } else if (cmdResult.hasOwnProperty("cursors") && cmdResult.cursors.length === 1 &&
+ cmdResult.cursors[0].hasOwnProperty("cursor")) {
+ assert(cmdResult.cursors[0].cursor.hasOwnProperty("id"));
+ return cmdResult.cursors[0].cursor;
}
- function runTest({useCausalConsistency, establishCursorCmd, readConcern}) {
- let cmdName = Object.getOwnPropertyNames(establishCursorCmd)[0];
+ throw Error("parseCursor failed to find cursor object. Command Result: " + tojson(cmdResult));
+}
+
+function runTest({useCausalConsistency, establishCursorCmd, readConcern}) {
+ let cmdName = Object.getOwnPropertyNames(establishCursorCmd)[0];
- jsTestLog(`Test establishCursorCmd: ${cmdName},
+ jsTestLog(`Test establishCursorCmd: ${cmdName},
useCausalConsistency: ${useCausalConsistency},
readConcern: ${tojson(readConcern)}`);
- primaryDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+ primaryDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- const session =
- primaryDB.getMongo().startSession({causalConsistency: useCausalConsistency});
- const sessionDb = session.getDatabase(dbName);
+ const session = primaryDB.getMongo().startSession({causalConsistency: useCausalConsistency});
+ const sessionDb = session.getDatabase(dbName);
- const bulk = primaryDB.coll.initializeUnorderedBulkOp();
- for (let x = 0; x < 10; ++x) {
- bulk.insert({_id: x});
- }
- assert.commandWorked(bulk.execute({w: "majority"}));
-
- session.startTransaction({readConcern: readConcern});
-
- // Establish a snapshot batchSize:0 cursor.
- let res = assert.commandWorked(sessionDb.runCommand(establishCursorCmd));
- let cursor = parseCursor(res);
-
- assert(cursor.hasOwnProperty("firstBatch"), tojson(res));
- assert.eq(0, cursor.firstBatch.length, tojson(res));
- assert.neq(cursor.id, 0);
-
- // Insert an 11th document which should not be visible to the snapshot cursor. This write is
- // performed outside of the session.
- assert.writeOK(primaryDB.coll.insert({_id: 10}, {writeConcern: {w: "majority"}}));
-
- // Fetch the first 5 documents.
- res = assert.commandWorked(
- sessionDb.runCommand({getMore: cursor.id, collection: collName, batchSize: 5}));
- cursor = parseCursor(res);
- assert.neq(0, cursor.id, tojson(res));
- assert(cursor.hasOwnProperty("nextBatch"), tojson(res));
- assert.eq(5, cursor.nextBatch.length, tojson(res));
-
- // Exhaust the cursor, retrieving the remainder of the result set. Performing a second
- // getMore tests snapshot isolation across multiple getMore invocations.
- res = assert.commandWorked(
- sessionDb.runCommand({getMore: cursor.id, collection: collName, batchSize: 20}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // The cursor has been exhausted.
- cursor = parseCursor(res);
- assert.eq(0, cursor.id, tojson(res));
-
- // Only the remaining 5 of the initial 10 documents are returned. The 11th document is not
- // part of the result set.
- assert(cursor.hasOwnProperty("nextBatch"), tojson(res));
- assert.eq(5, cursor.nextBatch.length, tojson(res));
-
- // Perform a second snapshot read under a new transaction.
- session.startTransaction({readConcern: readConcern});
- res = assert.commandWorked(
- sessionDb.runCommand({find: collName, sort: {_id: 1}, batchSize: 20}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // The cursor has been exhausted.
- cursor = parseCursor(res);
- assert.eq(0, cursor.id, tojson(res));
-
- // All 11 documents are returned.
- assert(cursor.hasOwnProperty("firstBatch"), tojson(res));
- assert.eq(11, cursor.firstBatch.length, tojson(res));
-
- session.endSession();
+ const bulk = primaryDB.coll.initializeUnorderedBulkOp();
+ for (let x = 0; x < 10; ++x) {
+ bulk.insert({_id: x});
}
-
- // Test transaction reads using find or aggregate. Inserts outside
- // transaction aren't visible, even after they are majority-committed.
- // (This is a requirement for readConcern snapshot, but it is merely an
- // implementation detail for majority or for the default, local. At some
- // point, it would be desirable to have a transaction with readConcern
- // local or majority see writes from other sessions. However, our current
- // implementation of ensuring any data we read does not get rolled back
- // relies on the fact that we read from a single WT snapshot, since we
- // choose the timestamp to wait on in the first command of the
- // transaction.)
- let findCmd = {find: collName, sort: {_id: 1}, batchSize: 0};
- let aggCmd = {aggregate: collName, pipeline: [{$sort: {_id: 1}}], cursor: {batchSize: 0}};
-
- for (let establishCursorCmd of[findCmd, aggCmd]) {
- for (let useCausalConsistency of[false, true]) {
- for (let readConcern of[{level: "snapshot"}, {level: "majority"}, null]) {
- runTest({
- establishCursorCmd: establishCursorCmd,
- useCausalConsistency: useCausalConsistency,
- readConcern: readConcern
- });
- }
+ assert.commandWorked(bulk.execute({w: "majority"}));
+
+ session.startTransaction({readConcern: readConcern});
+
+ // Establish a snapshot batchSize:0 cursor.
+ let res = assert.commandWorked(sessionDb.runCommand(establishCursorCmd));
+ let cursor = parseCursor(res);
+
+ assert(cursor.hasOwnProperty("firstBatch"), tojson(res));
+ assert.eq(0, cursor.firstBatch.length, tojson(res));
+ assert.neq(cursor.id, 0);
+
+ // Insert an 11th document which should not be visible to the snapshot cursor. This write is
+ // performed outside of the session.
+ assert.writeOK(primaryDB.coll.insert({_id: 10}, {writeConcern: {w: "majority"}}));
+
+ // Fetch the first 5 documents.
+ res = assert.commandWorked(
+ sessionDb.runCommand({getMore: cursor.id, collection: collName, batchSize: 5}));
+ cursor = parseCursor(res);
+ assert.neq(0, cursor.id, tojson(res));
+ assert(cursor.hasOwnProperty("nextBatch"), tojson(res));
+ assert.eq(5, cursor.nextBatch.length, tojson(res));
+
+ // Exhaust the cursor, retrieving the remainder of the result set. Performing a second
+ // getMore tests snapshot isolation across multiple getMore invocations.
+ res = assert.commandWorked(
+ sessionDb.runCommand({getMore: cursor.id, collection: collName, batchSize: 20}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // The cursor has been exhausted.
+ cursor = parseCursor(res);
+ assert.eq(0, cursor.id, tojson(res));
+
+ // Only the remaining 5 of the initial 10 documents are returned. The 11th document is not
+ // part of the result set.
+ assert(cursor.hasOwnProperty("nextBatch"), tojson(res));
+ assert.eq(5, cursor.nextBatch.length, tojson(res));
+
+ // Perform a second snapshot read under a new transaction.
+ session.startTransaction({readConcern: readConcern});
+ res =
+ assert.commandWorked(sessionDb.runCommand({find: collName, sort: {_id: 1}, batchSize: 20}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // The cursor has been exhausted.
+ cursor = parseCursor(res);
+ assert.eq(0, cursor.id, tojson(res));
+
+ // All 11 documents are returned.
+ assert(cursor.hasOwnProperty("firstBatch"), tojson(res));
+ assert.eq(11, cursor.firstBatch.length, tojson(res));
+
+ session.endSession();
+}
+
+// Test transaction reads using find or aggregate. Inserts outside
+// transaction aren't visible, even after they are majority-committed.
+// (This is a requirement for readConcern snapshot, but it is merely an
+// implementation detail for majority or for the default, local. At some
+// point, it would be desirable to have a transaction with readConcern
+// local or majority see writes from other sessions. However, our current
+// implementation of ensuring any data we read does not get rolled back
+// relies on the fact that we read from a single WT snapshot, since we
+// choose the timestamp to wait on in the first command of the
+// transaction.)
+let findCmd = {find: collName, sort: {_id: 1}, batchSize: 0};
+let aggCmd = {aggregate: collName, pipeline: [{$sort: {_id: 1}}], cursor: {batchSize: 0}};
+
+for (let establishCursorCmd of [findCmd, aggCmd]) {
+ for (let useCausalConsistency of [false, true]) {
+ for (let readConcern of [{level: "snapshot"}, {level: "majority"}, null]) {
+ runTest({
+ establishCursorCmd: establishCursorCmd,
+ useCausalConsistency: useCausalConsistency,
+ readConcern: readConcern
+ });
}
}
+}
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/socket_disconnect_kills.js b/jstests/noPassthrough/socket_disconnect_kills.js
index 5214bf58ad4..3d6eb513b24 100644
--- a/jstests/noPassthrough/socket_disconnect_kills.js
+++ b/jstests/noPassthrough/socket_disconnect_kills.js
@@ -11,217 +11,213 @@
// @tags: [requires_sharding]
(function() {
- "use strict";
-
- const testName = "socket_disconnect_kills";
-
- // Used to generate unique appnames
- let id = 0;
-
- // client - A client connection for curop (and that holds the hostname)
- // pre - A callback to run with the timing out socket
- // post - A callback to run after everything else has resolved (cleanup)
- //
- // Returns false if the op was gone from current op
- function check(client, pre, post) {
- const interval = 200;
- const timeout = 10000;
- const socketTimeout = 5000;
-
- const host = client.host;
-
- // Make a socket which will timeout
- id++;
- let conn = new Mongo(
- `mongodb://${host}/?socketTimeoutMS=${socketTimeout}&appName=${testName}${id}`);
-
- // Make sure it works at all
- assert.commandWorked(conn.adminCommand({ping: 1}));
-
- try {
- // Make sure that whatever operation we ran had a network error
- assert.throws(function() {
- try {
- pre(conn);
- } catch (e) {
- throw e;
- }
- }, [], "error doing query: failed: network error while attempting");
-
- // Spin until the op leaves currentop, or timeout passes
- const start = new Date();
-
- while (1) {
- if (!client.getDB("admin")
- .aggregate([
- {$currentOp: {localOps: true}},
- {$match: {appName: testName + id}},
- ])
- .itcount()) {
- return false;
- }
-
- if (((new Date()).getTime() - start.getTime()) > timeout) {
- return true;
- }
-
- sleep(interval);
- }
- } finally {
- post();
- }
- }
-
- function runWithCuropFailPointEnabled(client, failPointName) {
- return function(entry) {
- entry[0](client,
- function(client) {
- assert.commandWorked(client.adminCommand({
- configureFailPoint: failPointName,
- mode: "alwaysOn",
- data: {shouldCheckForInterrupt: true},
- }));
-
- entry[1](client);
- },
- function() {
- assert.commandWorked(
- client.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- });
- };
- }
-
- function runWithCmdFailPointEnabled(client) {
- return function(entry) {
- const failPointName = "waitInCommandMarkKillOnClientDisconnect";
-
- entry[0](client,
- function(client) {
- assert.commandWorked(client.adminCommand({
- configureFailPoint: failPointName,
- mode: "alwaysOn",
- data: {appName: testName + id},
- }));
-
- entry[1](client);
- },
- function() {
- assert.commandWorked(
- client.adminCommand({configureFailPoint: failPointName, mode: "off"}));
- });
- };
- }
+"use strict";
- function checkClosedEarly(client, pre, post) {
- assert(!check(client, pre, post), "operation killed on socket disconnect");
- }
-
- function checkNotClosedEarly(client, pre, post) {
- assert(check(client, pre, post), "operation not killed on socket disconnect");
- }
+const testName = "socket_disconnect_kills";
- function runCommand(cmd) {
- return function(client) {
- assert.commandWorked(client.getDB(testName).runCommand(cmd));
- };
- }
+// Used to generate unique appnames
+let id = 0;
- function runTests(client) {
- let admin = client.getDB("admin");
+// client - A client connection for curop (and that holds the hostname)
+// pre - A callback to run with the timing out socket
+// post - A callback to run after everything else has resolved (cleanup)
+//
+// Returns false if the op was gone from current op
+function check(client, pre, post) {
+ const interval = 200;
+ const timeout = 10000;
+ const socketTimeout = 5000;
+
+ const host = client.host;
+
+ // Make a socket which will timeout
+ id++;
+ let conn =
+ new Mongo(`mongodb://${host}/?socketTimeoutMS=${socketTimeout}&appName=${testName}${id}`);
+
+ // Make sure it works at all
+ assert.commandWorked(conn.adminCommand({ping: 1}));
+
+ try {
+ // Make sure that whatever operation we ran had a network error
+ assert.throws(function() {
+ try {
+ pre(conn);
+ } catch (e) {
+ throw e;
+ }
+ }, [], "error doing query: failed: network error while attempting");
+
+ // Spin until the op leaves currentop, or timeout passes
+ const start = new Date();
+
+ while (1) {
+ if (!client.getDB("admin")
+ .aggregate([
+ {$currentOp: {localOps: true}},
+ {$match: {appName: testName + id}},
+ ])
+ .itcount()) {
+ return false;
+ }
- assert.writeOK(client.getDB(testName).test.insert({x: 1}));
- assert.writeOK(client.getDB(testName).test.insert({x: 2}));
- assert.writeOK(client.getDB(testName).test.insert({x: 3}));
+ if (((new Date()).getTime() - start.getTime()) > timeout) {
+ return true;
+ }
- [[checkClosedEarly, runCommand({find: "test", filter: {}})],
- [
- checkClosedEarly,
- runCommand({
- find: "test",
- filter: {
- $where: function() {
- sleep(100000);
- }
- }
- })
- ],
- [
- checkClosedEarly,
- runCommand({
- find: "test",
- filter: {
- $where: function() {
- while (true) {
- }
- }
- }
- })
- ],
- [
- checkClosedEarly,
- function(client) {
- client.forceReadMode("legacy");
- assert(client.getDB(testName).test.findOne({}));
- }
- ],
- ].forEach(runWithCuropFailPointEnabled(client, "waitInFindBeforeMakingBatch"));
-
- // After SERVER-39475, re-enable these tests and add negative testing for $out cursors.
- const serverSupportsEarlyDisconnectOnGetMore = false;
- if (serverSupportsEarlyDisconnectOnGetMore) {
- [[
- checkClosedEarly,
- function(client) {
- let result = assert.commandWorked(
- client.getDB(testName).runCommand({find: "test", filter: {}, batchSize: 0}));
- assert.commandWorked(client.getDB(testName).runCommand(
- {getMore: result.cursor.id, collection: "test"}));
- }
- ],
- [
- checkClosedEarly,
- function(client) {
- client.forceReadMode("legacy");
- var cursor = client.getDB(testName).test.find({}).batchSize(2);
- assert(cursor.next());
- assert(cursor.next());
- assert(cursor.next());
- }
- ],
- ].forEach(runWithCuropFailPointEnabled(client,
- "waitAfterPinningCursorBeforeGetMoreBatch"));
+ sleep(interval);
}
-
- [[checkClosedEarly, runCommand({aggregate: "test", pipeline: [], cursor: {}})],
+ } finally {
+ post();
+ }
+}
+
+function runWithCuropFailPointEnabled(client, failPointName) {
+ return function(entry) {
+ entry[0](client,
+ function(client) {
+ assert.commandWorked(client.adminCommand({
+ configureFailPoint: failPointName,
+ mode: "alwaysOn",
+ data: {shouldCheckForInterrupt: true},
+ }));
+
+ entry[1](client);
+ },
+ function() {
+ assert.commandWorked(
+ client.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+ });
+ };
+}
+
+function runWithCmdFailPointEnabled(client) {
+ return function(entry) {
+ const failPointName = "waitInCommandMarkKillOnClientDisconnect";
+
+ entry[0](client,
+ function(client) {
+ assert.commandWorked(client.adminCommand({
+ configureFailPoint: failPointName,
+ mode: "alwaysOn",
+ data: {appName: testName + id},
+ }));
+
+ entry[1](client);
+ },
+ function() {
+ assert.commandWorked(
+ client.adminCommand({configureFailPoint: failPointName, mode: "off"}));
+ });
+ };
+}
+
+function checkClosedEarly(client, pre, post) {
+ assert(!check(client, pre, post), "operation killed on socket disconnect");
+}
+
+function checkNotClosedEarly(client, pre, post) {
+ assert(check(client, pre, post), "operation not killed on socket disconnect");
+}
+
+function runCommand(cmd) {
+ return function(client) {
+ assert.commandWorked(client.getDB(testName).runCommand(cmd));
+ };
+}
+
+function runTests(client) {
+ let admin = client.getDB("admin");
+
+ assert.writeOK(client.getDB(testName).test.insert({x: 1}));
+ assert.writeOK(client.getDB(testName).test.insert({x: 2}));
+ assert.writeOK(client.getDB(testName).test.insert({x: 3}));
+
+ [[checkClosedEarly, runCommand({find: "test", filter: {}})],
+ [
+ checkClosedEarly,
+ runCommand({
+ find: "test",
+ filter: {
+ $where: function() {
+ sleep(100000);
+ }
+ }
+ })
+ ],
+ [
+ checkClosedEarly,
+ runCommand({
+ find: "test",
+ filter: {
+ $where: function() {
+ while (true) {
+ }
+ }
+ }
+ })
+ ],
+ [
+ checkClosedEarly,
+ function(client) {
+ client.forceReadMode("legacy");
+ assert(client.getDB(testName).test.findOne({}));
+ }
+ ],
+ ].forEach(runWithCuropFailPointEnabled(client, "waitInFindBeforeMakingBatch"));
+
+ // After SERVER-39475, re-enable these tests and add negative testing for $out cursors.
+ const serverSupportsEarlyDisconnectOnGetMore = false;
+ if (serverSupportsEarlyDisconnectOnGetMore) {
+ [[
+ checkClosedEarly,
+ function(client) {
+ let result = assert.commandWorked(
+ client.getDB(testName).runCommand({find: "test", filter: {}, batchSize: 0}));
+ assert.commandWorked(client.getDB(testName).runCommand(
+ {getMore: result.cursor.id, collection: "test"}));
+ }
+ ],
[
- checkNotClosedEarly,
- runCommand({aggregate: "test", pipeline: [{$out: "out"}], cursor: {}})
+ checkClosedEarly,
+ function(client) {
+ client.forceReadMode("legacy");
+ var cursor = client.getDB(testName).test.find({}).batchSize(2);
+ assert(cursor.next());
+ assert(cursor.next());
+ assert(cursor.next());
+ }
],
- ].forEach(runWithCmdFailPointEnabled(client));
-
- [[checkClosedEarly, runCommand({count: "test"})],
- [checkClosedEarly, runCommand({distinct: "test", key: "x"})],
- [checkClosedEarly, runCommand({authenticate: "test", user: "x", pass: "y"})],
- [checkClosedEarly, runCommand({getnonce: 1})],
- [checkClosedEarly, runCommand({saslStart: 1})],
- [checkClosedEarly, runCommand({saslContinue: 1})],
- [checkClosedEarly, runCommand({ismaster: 1})],
- [checkClosedEarly, runCommand({listCollections: 1})],
- [checkClosedEarly, runCommand({listDatabases: 1})],
- [checkClosedEarly, runCommand({listIndexes: "test"})],
- ].forEach(runWithCmdFailPointEnabled(client));
+ ].forEach(runWithCuropFailPointEnabled(client, "waitAfterPinningCursorBeforeGetMoreBatch"));
}
- {
- let proc = MongoRunner.runMongod();
- assert.neq(proc, null);
- runTests(proc);
- MongoRunner.stopMongod(proc);
- }
-
- {
- let st = ShardingTest({mongo: 1, config: 1, shards: 1});
- runTests(st.s0);
- st.stop();
- }
+ [[checkClosedEarly, runCommand({aggregate: "test", pipeline: [], cursor: {}})],
+ [checkNotClosedEarly, runCommand({aggregate: "test", pipeline: [{$out: "out"}], cursor: {}})],
+ ].forEach(runWithCmdFailPointEnabled(client));
+
+ [[checkClosedEarly, runCommand({count: "test"})],
+ [checkClosedEarly, runCommand({distinct: "test", key: "x"})],
+ [checkClosedEarly, runCommand({authenticate: "test", user: "x", pass: "y"})],
+ [checkClosedEarly, runCommand({getnonce: 1})],
+ [checkClosedEarly, runCommand({saslStart: 1})],
+ [checkClosedEarly, runCommand({saslContinue: 1})],
+ [checkClosedEarly, runCommand({ismaster: 1})],
+ [checkClosedEarly, runCommand({listCollections: 1})],
+ [checkClosedEarly, runCommand({listDatabases: 1})],
+ [checkClosedEarly, runCommand({listIndexes: "test"})],
+ ].forEach(runWithCmdFailPointEnabled(client));
+}
+
+{
+ let proc = MongoRunner.runMongod();
+ assert.neq(proc, null);
+ runTests(proc);
+ MongoRunner.stopMongod(proc);
+}
+
+{
+ let st = ShardingTest({mongo: 1, config: 1, shards: 1});
+ runTests(st.s0);
+ st.stop();
+}
})();
diff --git a/jstests/noPassthrough/standalone_replication_recovery.js b/jstests/noPassthrough/standalone_replication_recovery.js
index 1def927772c..6ee47fc9c20 100644
--- a/jstests/noPassthrough/standalone_replication_recovery.js
+++ b/jstests/noPassthrough/standalone_replication_recovery.js
@@ -7,155 +7,150 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
- load("jstests/libs/write_concern_util.js");
-
- const name = 'standalone_replication_recovery';
- const dbName = name;
- const collName = 'srr_coll';
- const logLevel = tojson({storage: {recovery: 2}});
-
- const rst = new ReplSetTest({
- nodes: 2,
- });
-
- function getColl(conn) {
- return conn.getDB(dbName)[collName];
+"use strict";
+load("jstests/replsets/rslib.js");
+load("jstests/libs/write_concern_util.js");
+
+const name = 'standalone_replication_recovery';
+const dbName = name;
+const collName = 'srr_coll';
+const logLevel = tojson({storage: {recovery: 2}});
+
+const rst = new ReplSetTest({
+ nodes: 2,
+});
+
+function getColl(conn) {
+ return conn.getDB(dbName)[collName];
+}
+
+function assertDocsInColl(node, nums) {
+ let results = getColl(node).find().sort({_id: 1}).toArray();
+ let expected = nums.map((i) => ({_id: i}));
+ if (!friendlyEqual(results, expected)) {
+ rst.dumpOplog(node, {}, 100);
}
-
- function assertDocsInColl(node, nums) {
- let results = getColl(node).find().sort({_id: 1}).toArray();
- let expected = nums.map((i) => ({_id: i}));
- if (!friendlyEqual(results, expected)) {
- rst.dumpOplog(node, {}, 100);
- }
- assert.eq(results, expected, "actual (left) != expected (right)");
- }
-
- jsTestLog("Test that an empty standalone fails trying to recover.");
- assert.throws(
- () => rst.start(0, {noReplSet: true, setParameter: {recoverFromOplogAsStandalone: true}}));
-
- jsTestLog("Initiating as a replica set.");
- // Restart as a replica set node without the flag so we can add operations to the oplog.
- let nodes = rst.startSet({setParameter: {logComponentVerbosity: logLevel}});
- let node = nodes[0];
- let secondary = nodes[1];
- rst.initiate({
- _id: name,
- members: [{_id: 0, host: node.host}, {_id: 2, host: secondary.host, priority: 0}]
- });
-
- // Create the collection with w:majority and then perform a clean restart to ensure that
- // the collection is in a stable checkpoint.
- assert.commandWorked(node.getDB(dbName).runCommand({
- create: collName,
- writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}
- }));
- assertDocsInColl(node, []);
- node = rst.restart(node, {"noReplSet": false});
- reconnect(node);
- assert.eq(rst.getPrimary(), node);
-
- // Keep node 0 the primary, but prevent it from committing any writes.
- stopServerReplication(secondary);
-
- assert.commandWorked(getColl(node).insert({_id: 3}, {writeConcern: {w: 1, j: 1}}));
- assert.commandWorked(getColl(node).insert({_id: 4}, {writeConcern: {w: 1, j: 1}}));
- assert.commandWorked(getColl(node).insert({_id: 5}, {writeConcern: {w: 1, j: 1}}));
- assertDocsInColl(node, [3, 4, 5]);
-
- jsTestLog("Test that if we kill the node, recovery still plays.");
- rst.stop(node, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- node = rst.restart(node, {"noReplSet": false});
- reconnect(node);
- assert.eq(rst.getPrimary(), node);
- assertDocsInColl(node, [3, 4, 5]);
-
- jsTestLog("Test that a replica set node cannot start up with the parameter set.");
- assert.throws(() => rst.restart(0, {
- setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
- }));
-
- jsTestLog("Test that on restart as a standalone we only see committed writes by default.");
- node =
- rst.start(node, {noReplSet: true, setParameter: {logComponentVerbosity: logLevel}}, true);
- reconnect(node);
- assertDocsInColl(node, []);
-
- jsTestLog("Test that on restart with the flag set we play recovery.");
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, [3, 4, 5]);
-
- jsTestLog("Test that we go into read-only mode.");
- assert.commandFailedWithCode(getColl(node).insert({_id: 1}), ErrorCodes.IllegalOperation);
-
- jsTestLog("Test that we cannot set the parameter during standalone runtime.");
- assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: true}));
- assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: false}));
-
- jsTestLog("Test that on restart after standalone recovery we do not see replicated writes.");
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, []);
- assert.commandWorked(getColl(node).insert({_id: 6}));
- assertDocsInColl(node, [6]);
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, [3, 4, 5, 6]);
-
- jsTestLog("Test that we can restart again as a replica set node.");
- node = rst.restart(node, {
- noReplSet: false,
- setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assert.eq(rst.getPrimary(), node);
- assertDocsInColl(node, [3, 4, 5, 6]);
-
- jsTestLog("Test that we cannot set the parameter during replica set runtime.");
- assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: true}));
- assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: false}));
-
- jsTestLog("Test that we can still recover as a standalone.");
- assert.commandWorked(getColl(node).insert({_id: 7}));
- assertDocsInColl(node, [3, 4, 5, 6, 7]);
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, [6]);
- node = rst.restart(node, {
- noReplSet: true,
- setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assertDocsInColl(node, [3, 4, 5, 6, 7]);
-
- jsTestLog("Restart as a replica set node so that the test can complete successfully.");
- node = rst.restart(node, {
- noReplSet: false,
- setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
- });
- reconnect(node);
- assert.eq(rst.getPrimary(), node);
- assertDocsInColl(node, [3, 4, 5, 6, 7]);
-
- restartServerReplication(secondary);
-
- // Skip checking db hashes since we do a write as a standalone.
- TestData.skipCheckDBHashes = true;
- rst.stopSet();
+ assert.eq(results, expected, "actual (left) != expected (right)");
+}
+
+jsTestLog("Test that an empty standalone fails trying to recover.");
+assert.throws(
+ () => rst.start(0, {noReplSet: true, setParameter: {recoverFromOplogAsStandalone: true}}));
+
+jsTestLog("Initiating as a replica set.");
+// Restart as a replica set node without the flag so we can add operations to the oplog.
+let nodes = rst.startSet({setParameter: {logComponentVerbosity: logLevel}});
+let node = nodes[0];
+let secondary = nodes[1];
+rst.initiate(
+ {_id: name, members: [{_id: 0, host: node.host}, {_id: 2, host: secondary.host, priority: 0}]});
+
+// Create the collection with w:majority and then perform a clean restart to ensure that
+// the collection is in a stable checkpoint.
+assert.commandWorked(node.getDB(dbName).runCommand(
+ {create: collName, writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assertDocsInColl(node, []);
+node = rst.restart(node, {"noReplSet": false});
+reconnect(node);
+assert.eq(rst.getPrimary(), node);
+
+// Keep node 0 the primary, but prevent it from committing any writes.
+stopServerReplication(secondary);
+
+assert.commandWorked(getColl(node).insert({_id: 3}, {writeConcern: {w: 1, j: 1}}));
+assert.commandWorked(getColl(node).insert({_id: 4}, {writeConcern: {w: 1, j: 1}}));
+assert.commandWorked(getColl(node).insert({_id: 5}, {writeConcern: {w: 1, j: 1}}));
+assertDocsInColl(node, [3, 4, 5]);
+
+jsTestLog("Test that if we kill the node, recovery still plays.");
+rst.stop(node, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+node = rst.restart(node, {"noReplSet": false});
+reconnect(node);
+assert.eq(rst.getPrimary(), node);
+assertDocsInColl(node, [3, 4, 5]);
+
+jsTestLog("Test that a replica set node cannot start up with the parameter set.");
+assert.throws(
+ () => rst.restart(
+ 0, {setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}}));
+
+jsTestLog("Test that on restart as a standalone we only see committed writes by default.");
+node = rst.start(node, {noReplSet: true, setParameter: {logComponentVerbosity: logLevel}}, true);
+reconnect(node);
+assertDocsInColl(node, []);
+
+jsTestLog("Test that on restart with the flag set we play recovery.");
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, [3, 4, 5]);
+
+jsTestLog("Test that we go into read-only mode.");
+assert.commandFailedWithCode(getColl(node).insert({_id: 1}), ErrorCodes.IllegalOperation);
+
+jsTestLog("Test that we cannot set the parameter during standalone runtime.");
+assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: true}));
+assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: false}));
+
+jsTestLog("Test that on restart after standalone recovery we do not see replicated writes.");
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, []);
+assert.commandWorked(getColl(node).insert({_id: 6}));
+assertDocsInColl(node, [6]);
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, [3, 4, 5, 6]);
+
+jsTestLog("Test that we can restart again as a replica set node.");
+node = rst.restart(node, {
+ noReplSet: false,
+ setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assert.eq(rst.getPrimary(), node);
+assertDocsInColl(node, [3, 4, 5, 6]);
+
+jsTestLog("Test that we cannot set the parameter during replica set runtime.");
+assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: true}));
+assert.commandFailed(node.adminCommand({setParameter: 1, recoverFromOplogAsStandalone: false}));
+
+jsTestLog("Test that we can still recover as a standalone.");
+assert.commandWorked(getColl(node).insert({_id: 7}));
+assertDocsInColl(node, [3, 4, 5, 6, 7]);
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, [6]);
+node = rst.restart(node, {
+ noReplSet: true,
+ setParameter: {recoverFromOplogAsStandalone: true, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assertDocsInColl(node, [3, 4, 5, 6, 7]);
+
+jsTestLog("Restart as a replica set node so that the test can complete successfully.");
+node = rst.restart(node, {
+ noReplSet: false,
+ setParameter: {recoverFromOplogAsStandalone: false, logComponentVerbosity: logLevel}
+});
+reconnect(node);
+assert.eq(rst.getPrimary(), node);
+assertDocsInColl(node, [3, 4, 5, 6, 7]);
+
+restartServerReplication(secondary);
+
+// Skip checking db hashes since we do a write as a standalone.
+TestData.skipCheckDBHashes = true;
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/noPassthrough/start_session_command.js b/jstests/noPassthrough/start_session_command.js
index bb542e255fc..5c2bbd4b38b 100644
--- a/jstests/noPassthrough/start_session_command.js
+++ b/jstests/noPassthrough/start_session_command.js
@@ -1,103 +1,101 @@
(function() {
- 'use strict';
+'use strict';
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- var conn;
- var admin;
- var foo;
- var result;
- const request = {startSession: 1};
+var conn;
+var admin;
+var foo;
+var result;
+const request = {
+ startSession: 1
+};
- conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}});
- admin = conn.getDB("admin");
+conn = MongoRunner.runMongod({setParameter: {maxSessions: 2}});
+admin = conn.getDB("admin");
- // ensure that the cache is empty
- var serverStatus = assert.commandWorked(admin.adminCommand({serverStatus: 1}));
- assert.eq(0, serverStatus.logicalSessionRecordCache.activeSessionsCount);
+// ensure that the cache is empty
+var serverStatus = assert.commandWorked(admin.adminCommand({serverStatus: 1}));
+assert.eq(0, serverStatus.logicalSessionRecordCache.activeSessionsCount);
- // test that we can run startSession unauthenticated when the server is running without --auth
+// test that we can run startSession unauthenticated when the server is running without --auth
- result = admin.runCommand(request);
- assert.commandWorked(
- result,
- "failed test that we can run startSession unauthenticated when the server is running without --auth");
- assert(result.id, "failed test that our session response has an id");
- assert.eq(
- result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
+result = admin.runCommand(request);
+assert.commandWorked(
+ result,
+ "failed test that we can run startSession unauthenticated when the server is running without --auth");
+assert(result.id, "failed test that our session response has an id");
+assert.eq(result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
- // test that startSession added to the cache
- serverStatus = assert.commandWorked(admin.adminCommand({serverStatus: 1}));
- assert.eq(1, serverStatus.logicalSessionRecordCache.activeSessionsCount);
+// test that startSession added to the cache
+serverStatus = assert.commandWorked(admin.adminCommand({serverStatus: 1}));
+assert.eq(1, serverStatus.logicalSessionRecordCache.activeSessionsCount);
- // test that we can run startSession authenticated when the server is running without --auth
+// test that we can run startSession authenticated when the server is running without --auth
- admin.createUser({user: 'user0', pwd: 'password', roles: []});
- admin.auth("user0", "password");
+admin.createUser({user: 'user0', pwd: 'password', roles: []});
+admin.auth("user0", "password");
- result = admin.runCommand(request);
- assert.commandWorked(
- result,
- "failed test that we can run startSession authenticated when the server is running without --auth");
- assert(result.id, "failed test that our session response has an id");
- assert.eq(
- result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
+result = admin.runCommand(request);
+assert.commandWorked(
+ result,
+ "failed test that we can run startSession authenticated when the server is running without --auth");
+assert(result.id, "failed test that our session response has an id");
+assert.eq(result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
- assert.commandFailed(admin.runCommand(request),
- "failed test that we can't run startSession when the cache is full");
- MongoRunner.stopMongod(conn);
+assert.commandFailed(admin.runCommand(request),
+ "failed test that we can't run startSession when the cache is full");
+MongoRunner.stopMongod(conn);
- //
+//
- conn = MongoRunner.runMongod({auth: "", nojournal: ""});
- admin = conn.getDB("admin");
- foo = conn.getDB("foo");
+conn = MongoRunner.runMongod({auth: "", nojournal: ""});
+admin = conn.getDB("admin");
+foo = conn.getDB("foo");
- // test that we can't run startSession unauthenticated when the server is running with --auth
+// test that we can't run startSession unauthenticated when the server is running with --auth
- assert.commandFailed(
- admin.runCommand(request),
- "failed test that we can't run startSession unauthenticated when the server is running with --auth");
+assert.commandFailed(
+ admin.runCommand(request),
+ "failed test that we can't run startSession unauthenticated when the server is running with --auth");
- //
+//
- admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
- admin.auth("admin", "admin");
- admin.createUser({user: 'user0', pwd: 'password', roles: jsTest.basicUserRoles});
- foo.createUser({user: 'user1', pwd: 'password', roles: jsTest.basicUserRoles});
- admin.createUser({user: 'user2', pwd: 'password', roles: []});
- admin.logout();
+admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
+admin.auth("admin", "admin");
+admin.createUser({user: 'user0', pwd: 'password', roles: jsTest.basicUserRoles});
+foo.createUser({user: 'user1', pwd: 'password', roles: jsTest.basicUserRoles});
+admin.createUser({user: 'user2', pwd: 'password', roles: []});
+admin.logout();
- // test that we can run startSession authenticated as one user with proper permissions
+// test that we can run startSession authenticated as one user with proper permissions
- admin.auth("user0", "password");
- result = admin.runCommand(request);
- assert.commandWorked(
- result,
- "failed test that we can run startSession authenticated as one user with proper permissions");
- assert(result.id, "failed test that our session response has an id");
- assert.eq(
- result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
+admin.auth("user0", "password");
+result = admin.runCommand(request);
+assert.commandWorked(
+ result,
+ "failed test that we can run startSession authenticated as one user with proper permissions");
+assert(result.id, "failed test that our session response has an id");
+assert.eq(result.timeoutMinutes, 30, "failed test that our session record has the correct timeout");
- // test that we cant run startSession authenticated as two users with proper permissions
+// test that we cant run startSession authenticated as two users with proper permissions
- foo.auth("user1", "password");
- assert.commandFailed(
- admin.runCommand(request),
- "failed test that we cant run startSession authenticated as two users with proper permissions");
+foo.auth("user1", "password");
+assert.commandFailed(
+ admin.runCommand(request),
+ "failed test that we cant run startSession authenticated as two users with proper permissions");
- // test that we cant run startSession authenticated as one user without proper permissions
+// test that we cant run startSession authenticated as one user without proper permissions
- admin.logout();
- admin.auth("user2", "password");
- assert.commandFailed(
- admin.runCommand(request),
- "failed test that we cant run startSession authenticated as one user without proper permissions");
+admin.logout();
+admin.auth("user2", "password");
+assert.commandFailed(
+ admin.runCommand(request),
+ "failed test that we cant run startSession authenticated as one user without proper permissions");
- //
-
- MongoRunner.stopMongod(conn);
+//
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/startup_logging.js b/jstests/noPassthrough/startup_logging.js
index d1b73bb1c56..ced10a08d04 100644
--- a/jstests/noPassthrough/startup_logging.js
+++ b/jstests/noPassthrough/startup_logging.js
@@ -4,32 +4,31 @@
(function() {
- 'use strict';
-
- function makeRegExMatchFn(pattern) {
- return function(text) {
- return pattern.test(text);
- };
- }
-
- function testStartupLogging(launcher, matchFn, expectedExitCode) {
- assert(matchFn(rawMongoProgramOutput()));
- }
-
- function validateWaitingMessage(launcher) {
- clearRawMongoProgramOutput();
- var conn = launcher.start({});
- launcher.stop(conn, undefined, {});
- testStartupLogging(launcher, makeRegExMatchFn(/waiting for connections on port/));
- }
-
- print("********************\nTesting startup logging in mongod\n********************");
-
- validateWaitingMessage({
- start: function(opts) {
- return MongoRunner.runMongod(opts);
- },
- stop: MongoRunner.stopMongod
- });
-
+'use strict';
+
+function makeRegExMatchFn(pattern) {
+ return function(text) {
+ return pattern.test(text);
+ };
+}
+
+function testStartupLogging(launcher, matchFn, expectedExitCode) {
+ assert(matchFn(rawMongoProgramOutput()));
+}
+
+function validateWaitingMessage(launcher) {
+ clearRawMongoProgramOutput();
+ var conn = launcher.start({});
+ launcher.stop(conn, undefined, {});
+ testStartupLogging(launcher, makeRegExMatchFn(/waiting for connections on port/));
+}
+
+print("********************\nTesting startup logging in mongod\n********************");
+
+validateWaitingMessage({
+ start: function(opts) {
+ return MongoRunner.runMongod(opts);
+ },
+ stop: MongoRunner.stopMongod
+});
}());
diff --git a/jstests/noPassthrough/step_down_during_drop_database.js b/jstests/noPassthrough/step_down_during_drop_database.js
index 51a21afce76..5480605b1c3 100644
--- a/jstests/noPassthrough/step_down_during_drop_database.js
+++ b/jstests/noPassthrough/step_down_during_drop_database.js
@@ -6,55 +6,55 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const replSet = new ReplSetTest({nodes: 2});
- replSet.startSet();
- replSet.initiate();
+const replSet = new ReplSetTest({nodes: 2});
+replSet.startSet();
+replSet.initiate();
- let primary = replSet.getPrimary();
- let testDB = primary.getDB(dbName);
+let primary = replSet.getPrimary();
+let testDB = primary.getDB(dbName);
- const size = 5;
- jsTest.log("Creating " + size + " test documents.");
- var bulk = testDB.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute());
- replSet.awaitReplication();
+const size = 5;
+jsTest.log("Creating " + size + " test documents.");
+var bulk = testDB.getCollection(collName).initializeUnorderedBulkOp();
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
+}
+assert.writeOK(bulk.execute());
+replSet.awaitReplication();
- const failpoint = "dropDatabaseHangAfterAllCollectionsDrop";
- assert.commandWorked(primary.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+const failpoint = "dropDatabaseHangAfterAllCollectionsDrop";
+assert.commandWorked(primary.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- // Run the dropDatabase command and stepdown the primary while it is running.
- const awaitShell = startParallelShell(() => {
- db.dropDatabase();
- }, testDB.getMongo().port);
+// Run the dropDatabase command and stepdown the primary while it is running.
+const awaitShell = startParallelShell(() => {
+ db.dropDatabase();
+}, testDB.getMongo().port);
- // Ensure the dropDatabase command has begun before stepping down.
- checkLog.contains(primary,
- "dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop " +
- "enabled. Blocking until fail point is disabled.");
+// Ensure the dropDatabase command has begun before stepping down.
+checkLog.contains(primary,
+ "dropDatabase - fail point dropDatabaseHangAfterAllCollectionsDrop " +
+ "enabled. Blocking until fail point is disabled.");
- assert.commandWorked(testDB.adminCommand({replSetStepDown: 60, force: true}));
- replSet.waitForState(primary, ReplSetTest.State.SECONDARY);
+assert.commandWorked(testDB.adminCommand({replSetStepDown: 60, force: true}));
+replSet.waitForState(primary, ReplSetTest.State.SECONDARY);
- assert.commandWorked(primary.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- awaitShell();
+assert.commandWorked(primary.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+awaitShell();
- primary = replSet.getPrimary();
- testDB = primary.getDB(dbName);
+primary = replSet.getPrimary();
+testDB = primary.getDB(dbName);
- // Run dropDatabase on the new primary. The secondary (formerly the primary) should be able to
- // drop the database too.
- testDB.dropDatabase();
- replSet.awaitReplication();
+// Run dropDatabase on the new primary. The secondary (formerly the primary) should be able to
+// drop the database too.
+testDB.dropDatabase();
+replSet.awaitReplication();
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/stepdown_query.js b/jstests/noPassthrough/stepdown_query.js
index 6351493bbb4..4e8cc001840 100644
--- a/jstests/noPassthrough/stepdown_query.js
+++ b/jstests/noPassthrough/stepdown_query.js
@@ -8,72 +8,72 @@
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
(function() {
- 'use strict';
+'use strict';
- // Set the refresh period to 10 min to rule out races
- _setShellFailPoint({
- configureFailPoint: "modifyReplicaSetMonitorDefaultRefreshPeriod",
- mode: "alwaysOn",
- data: {
- period: 10 * 60,
- },
- });
-
- var dbName = "test";
- var collName = jsTest.name();
+// Set the refresh period to 10 min to rule out races
+_setShellFailPoint({
+ configureFailPoint: "modifyReplicaSetMonitorDefaultRefreshPeriod",
+ mode: "alwaysOn",
+ data: {
+ period: 10 * 60,
+ },
+});
- function runTest(host, rst, waitForPrimary) {
- // We create a new connection to 'host' here instead of passing in the original connection.
- // This to work around the fact that connections created by ReplSetTest already have slaveOk
- // set on them, but we need a connection with slaveOk not set for this test.
- var conn = new Mongo(host);
- var coll = conn.getDB(dbName).getCollection(collName);
- assert(!coll.exists());
- assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
- var cursor = coll.find().batchSize(2);
- // Retrieve the first batch of results.
- cursor.next();
- cursor.next();
- assert.eq(0, cursor.objsLeftInBatch());
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
- assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- if (waitForPrimary) {
- rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
- }
- // When the primary steps down, it closes all client connections. Since 'conn' may be a
- // direct connection to the primary and the shell doesn't automatically retry operations on
- // network errors, we run a dummy operation here to force the shell to reconnect.
- try {
- conn.getDB("admin").runCommand("ping");
- } catch (e) {
- }
+var dbName = "test";
+var collName = jsTest.name();
- // Even though our connection doesn't have slaveOk set, we should still be able to iterate
- // our cursor and kill our cursor.
- assert(cursor.hasNext());
- assert.doesNotThrow(function() {
- cursor.close();
- });
+function runTest(host, rst, waitForPrimary) {
+ // We create a new connection to 'host' here instead of passing in the original connection.
+ // This to work around the fact that connections created by ReplSetTest already have slaveOk
+ // set on them, but we need a connection with slaveOk not set for this test.
+ var conn = new Mongo(host);
+ var coll = conn.getDB(dbName).getCollection(collName);
+ assert(!coll.exists());
+ assert.writeOK(coll.insert([{}, {}, {}, {}, {}]));
+ var cursor = coll.find().batchSize(2);
+ // Retrieve the first batch of results.
+ cursor.next();
+ cursor.next();
+ assert.eq(0, cursor.objsLeftInBatch());
+ var primary = rst.getPrimary();
+ var secondary = rst.getSecondary();
+ assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 60, force: true}));
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ if (waitForPrimary) {
+ rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
+ }
+ // When the primary steps down, it closes all client connections. Since 'conn' may be a
+ // direct connection to the primary and the shell doesn't automatically retry operations on
+ // network errors, we run a dummy operation here to force the shell to reconnect.
+ try {
+ conn.getDB("admin").runCommand("ping");
+ } catch (e) {
}
- // Test querying a replica set primary directly.
- var rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- runTest(rst.getPrimary().host, rst, false);
- rst.stopSet();
+ // Even though our connection doesn't have slaveOk set, we should still be able to iterate
+ // our cursor and kill our cursor.
+ assert(cursor.hasNext());
+ assert.doesNotThrow(function() {
+ cursor.close();
+ });
+}
+
+// Test querying a replica set primary directly.
+var rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+runTest(rst.getPrimary().host, rst, false);
+rst.stopSet();
- rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- runTest(rst.getURL(), rst, true);
- rst.stopSet();
+rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+runTest(rst.getURL(), rst, true);
+rst.stopSet();
- // Test querying a replica set primary through mongos.
- var st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2});
- rst = st.rs0;
- runTest(st.s0.host, rst, true);
- st.stop();
+// Test querying a replica set primary through mongos.
+var st = new ShardingTest({shards: 1, rs: {nodes: 2}, config: 2});
+rst = st.rs0;
+runTest(st.s0.host, rst, true);
+st.stop();
})();
diff --git a/jstests/noPassthrough/sync_write.js b/jstests/noPassthrough/sync_write.js
index 8908d6bad5e..a4c0d1ebe38 100644
--- a/jstests/noPassthrough/sync_write.js
+++ b/jstests/noPassthrough/sync_write.js
@@ -5,29 +5,29 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
+'use strict';
- // The following test verifies that writeConcern: {j: true} ensures that data is durable.
- var dbpath = MongoRunner.dataPath + 'sync_write';
- resetDbpath(dbpath);
+// The following test verifies that writeConcern: {j: true} ensures that data is durable.
+var dbpath = MongoRunner.dataPath + 'sync_write';
+resetDbpath(dbpath);
- var mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
+var mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
- // Start a mongod.
- var conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(null, conn, 'mongod was unable to start up');
+// Start a mongod.
+var conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(null, conn, 'mongod was unable to start up');
- // Now connect to the mongod, do a journaled write and abruptly stop the server.
- var testDB = conn.getDB('test');
- assert.writeOK(testDB.synced.insert({synced: true}, {writeConcern: {j: true}}));
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+// Now connect to the mongod, do a journaled write and abruptly stop the server.
+var testDB = conn.getDB('test');
+assert.writeOK(testDB.synced.insert({synced: true}, {writeConcern: {j: true}}));
+MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- // Restart the mongod.
- conn = MongoRunner.runMongod(mongodArgs);
- assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
+// Restart the mongod.
+conn = MongoRunner.runMongod(mongodArgs);
+assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
- // Check that our journaled write still is present.
- testDB = conn.getDB('test');
- assert.eq(1, testDB.synced.count({synced: true}), 'synced write was not found');
- MongoRunner.stopMongod(conn);
+// Check that our journaled write still is present.
+testDB = conn.getDB('test');
+assert.eq(1, testDB.synced.count({synced: true}), 'synced write was not found');
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/system_indexes.js b/jstests/noPassthrough/system_indexes.js
index fb0991b1abb..a1d9365e0a9 100644
--- a/jstests/noPassthrough/system_indexes.js
+++ b/jstests/noPassthrough/system_indexes.js
@@ -1,83 +1,83 @@
-/** Ensure that authorization system collections' indexes are correctly generated.
+/**
+ * Ensure that authorization system collections' indexes are correctly generated.
*
* This test requires users to persist across a restart.
* @tags: [requires_persistence]
*/
(function() {
- let conn = MongoRunner.runMongod();
- let config = conn.getDB("config");
- let db = conn.getDB("admin");
+let conn = MongoRunner.runMongod();
+let config = conn.getDB("config");
+let db = conn.getDB("admin");
- // TEST: User and role collections start off with no indexes
- assert.eq(0, db.system.users.getIndexes().length);
- assert.eq(0, db.system.roles.getIndexes().length);
+// TEST: User and role collections start off with no indexes
+assert.eq(0, db.system.users.getIndexes().length);
+assert.eq(0, db.system.roles.getIndexes().length);
- // TEST: User and role creation generates indexes
- db.createUser({user: "user", pwd: "pwd", roles: []});
- assert.eq(2, db.system.users.getIndexes().length);
+// TEST: User and role creation generates indexes
+db.createUser({user: "user", pwd: "pwd", roles: []});
+assert.eq(2, db.system.users.getIndexes().length);
- db.createRole({role: "role", privileges: [], roles: []});
- assert.eq(2, db.system.roles.getIndexes().length);
+db.createRole({role: "role", privileges: [], roles: []});
+assert.eq(2, db.system.roles.getIndexes().length);
- // TEST: Destroying admin.system.users index and restarting will recreate it
- assert.commandWorked(db.system.users.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.users.getIndexes().length);
- assert.eq(2, db.system.roles.getIndexes().length);
+// TEST: Destroying admin.system.users index and restarting will recreate it
+assert.commandWorked(db.system.users.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.users.getIndexes().length);
+assert.eq(2, db.system.roles.getIndexes().length);
- // TEST: Destroying admin.system.roles index and restarting will recreate it
- assert.commandWorked(db.system.roles.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.users.getIndexes().length);
- assert.eq(2, db.system.roles.getIndexes().length);
+// TEST: Destroying admin.system.roles index and restarting will recreate it
+assert.commandWorked(db.system.roles.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.users.getIndexes().length);
+assert.eq(2, db.system.roles.getIndexes().length);
- // TEST: Destroying both authorization indexes and restarting will recreate them
- assert.commandWorked(db.system.users.dropIndexes());
- assert.commandWorked(db.system.roles.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.users.getIndexes().length);
- assert.eq(2, db.system.roles.getIndexes().length);
+// TEST: Destroying both authorization indexes and restarting will recreate them
+assert.commandWorked(db.system.users.dropIndexes());
+assert.commandWorked(db.system.roles.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.users.getIndexes().length);
+assert.eq(2, db.system.roles.getIndexes().length);
- // TEST: Destroying the admin.system.users index and restarting will recreate it, even if
- // admin.system.roles does not exist
- // Use _mergeAuthzCollections to clear admin.system.users and admin.system.roles.
- assert.commandWorked(db.adminCommand({
- _mergeAuthzCollections: 1,
- tempUsersCollection: 'admin.tempusers',
- tempRolesCollection: 'admin.temproles',
- db: "",
- drop: true
- }));
- db.createUser({user: "user", pwd: "pwd", roles: []});
- assert.commandWorked(db.system.users.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.users.getIndexes().length);
-
- // TEST: Destroying the admin.system.roles index and restarting will recreate it, even if
- // admin.system.users does not exist
- // Use _mergeAuthzCollections to clear admin.system.users and admin.system.roles.
- assert.commandWorked(db.adminCommand({
- _mergeAuthzCollections: 1,
- tempUsersCollection: 'admin.tempusers',
- tempRolesCollection: 'admin.temproles',
- db: "",
- drop: true
- }));
- db.createRole({role: "role", privileges: [], roles: []});
- assert.commandWorked(db.system.roles.dropIndexes());
- MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({restart: conn, cleanData: false});
- db = conn.getDB("admin");
- assert.eq(2, db.system.roles.getIndexes().length);
- MongoRunner.stopMongod(conn);
+// TEST: Destroying the admin.system.users index and restarting will recreate it, even if
+// admin.system.roles does not exist
+// Use _mergeAuthzCollections to clear admin.system.users and admin.system.roles.
+assert.commandWorked(db.adminCommand({
+ _mergeAuthzCollections: 1,
+ tempUsersCollection: 'admin.tempusers',
+ tempRolesCollection: 'admin.temproles',
+ db: "",
+ drop: true
+}));
+db.createUser({user: "user", pwd: "pwd", roles: []});
+assert.commandWorked(db.system.users.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.users.getIndexes().length);
+// TEST: Destroying the admin.system.roles index and restarting will recreate it, even if
+// admin.system.users does not exist
+// Use _mergeAuthzCollections to clear admin.system.users and admin.system.roles.
+assert.commandWorked(db.adminCommand({
+ _mergeAuthzCollections: 1,
+ tempUsersCollection: 'admin.tempusers',
+ tempRolesCollection: 'admin.temproles',
+ db: "",
+ drop: true
+}));
+db.createRole({role: "role", privileges: [], roles: []});
+assert.commandWorked(db.system.roles.dropIndexes());
+MongoRunner.stopMongod(conn);
+conn = MongoRunner.runMongod({restart: conn, cleanData: false});
+db = conn.getDB("admin");
+assert.eq(2, db.system.roles.getIndexes().length);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
index e1aa184efa2..0d29b065e7b 100644
--- a/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
+++ b/jstests/noPassthrough/tailable_getmore_to_mongos_does_not_timeout.js
@@ -3,27 +3,26 @@
// This test was designed to reproduce SERVER-33942 against a mongos.
// @tags: [requires_sharding, requires_capped]
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({shards: 2});
+const st = new ShardingTest({shards: 2});
- const db = st.s.getDB("test");
- const coll = db.capped;
- assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 1024}));
- assert.writeOK(coll.insert({}));
- const findResult = assert.commandWorked(
- db.runCommand({find: "capped", filter: {}, tailable: true, awaitData: true}));
+const db = st.s.getDB("test");
+const coll = db.capped;
+assert.commandWorked(db.runCommand({create: "capped", capped: true, size: 1024}));
+assert.writeOK(coll.insert({}));
+const findResult = assert.commandWorked(
+ db.runCommand({find: "capped", filter: {}, tailable: true, awaitData: true}));
- const cursorId = findResult.cursor.id;
- assert.neq(cursorId, 0);
+const cursorId = findResult.cursor.id;
+assert.neq(cursorId, 0);
- // Test that the getMores on this tailable cursor are immune to interrupt.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: "capped", maxTimeMS: 30}));
- assert.commandWorked(db.runCommand({getMore: cursorId, collection: "capped"}));
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
+// Test that the getMores on this tailable cursor are immune to interrupt.
+assert.commandWorked(
+ db.adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
+assert.commandWorked(db.runCommand({getMore: cursorId, collection: "capped", maxTimeMS: 30}));
+assert.commandWorked(db.runCommand({getMore: cursorId, collection: "capped"}));
+assert.commandWorked(db.adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
- st.stop();
+st.stop();
}());
diff --git a/jstests/noPassthrough/thread_args.js b/jstests/noPassthrough/thread_args.js
index b633acb1081..03273d76af3 100644
--- a/jstests/noPassthrough/thread_args.js
+++ b/jstests/noPassthrough/thread_args.js
@@ -2,45 +2,45 @@
* This test makes makes sure Thread and ScopedThread work with --enableJavaScriptProtection
*/
(function() {
- 'use strict';
- load('jstests/libs/parallelTester.js');
+'use strict';
+load('jstests/libs/parallelTester.js');
- function testThread(threadType) {
- function threadFn(args) {
- // Ensure objects are passed through properly
- assert(args instanceof Object);
- // Ensure functions inside objects are still functions
- assert(args.func1 instanceof Function);
- assert(args.func1());
- // Ensure Code objects are converted to functions
- assert(args.func2 instanceof Function);
- assert(args.func2());
- // Ensure arrays are passed through properly
- assert(args.funcArray instanceof Array);
- // Ensure functions inside arrays are still functions.
- assert(args.funcArray[0] instanceof Function);
- assert(args.funcArray[0]());
- return true;
- }
+function testThread(threadType) {
+ function threadFn(args) {
+ // Ensure objects are passed through properly
+ assert(args instanceof Object);
+ // Ensure functions inside objects are still functions
+ assert(args.func1 instanceof Function);
+ assert(args.func1());
+ // Ensure Code objects are converted to functions
+ assert(args.func2 instanceof Function);
+ assert(args.func2());
+ // Ensure arrays are passed through properly
+ assert(args.funcArray instanceof Array);
+ // Ensure functions inside arrays are still functions.
+ assert(args.funcArray[0] instanceof Function);
+ assert(args.funcArray[0]());
+ return true;
+ }
- function returnTrue() {
- return true;
- }
+ function returnTrue() {
+ return true;
+ }
- var args = {
- func1: returnTrue,
- // Pass some Code objects to simulate what happens with --enableJavaScriptProtection
- func2: new Code(returnTrue.toString()),
- funcArray: [new Code(returnTrue.toString())]
- };
+ var args = {
+ func1: returnTrue,
+ // Pass some Code objects to simulate what happens with --enableJavaScriptProtection
+ func2: new Code(returnTrue.toString()),
+ funcArray: [new Code(returnTrue.toString())]
+ };
- var thread = new threadType(threadFn, args);
- thread.start();
- thread.join();
- assert(thread.returnData());
- }
+ var thread = new threadType(threadFn, args);
+ thread.start();
+ thread.join();
+ assert(thread.returnData());
+}
- // Test both Thread and ScopedThread
- testThread(Thread);
- testThread(ScopedThread);
+// Test both Thread and ScopedThread
+testThread(Thread);
+testThread(ScopedThread);
}());
diff --git a/jstests/noPassthrough/timestamp_index_builds.js b/jstests/noPassthrough/timestamp_index_builds.js
index e5ffa405d45..41f5ecfb42c 100644
--- a/jstests/noPassthrough/timestamp_index_builds.js
+++ b/jstests/noPassthrough/timestamp_index_builds.js
@@ -18,84 +18,82 @@
* @tags: [requires_replication, requires_persistence, requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({
- name: "timestampingIndexBuilds",
- nodes: 2,
- nodeOptions:
- {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
- });
- const nodes = rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ name: "timestampingIndexBuilds",
+ nodes: 2,
+ nodeOptions: {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
+});
+const nodes = rst.startSet();
+rst.initiate();
- if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
- // Only snapshotting storage engines require correct timestamping of index builds.
- rst.stopSet();
- return;
- }
+if (!rst.getPrimary().adminCommand("serverStatus").storageEngine.supportsSnapshotReadConcern) {
+ // Only snapshotting storage engines require correct timestamping of index builds.
+ rst.stopSet();
+ return;
+}
- function getColl(conn) {
- return conn.getDB("timestampingIndexBuild")["coll"];
- }
+function getColl(conn) {
+ return conn.getDB("timestampingIndexBuild")["coll"];
+}
- let coll = getColl(rst.getPrimary());
+let coll = getColl(rst.getPrimary());
- // Create a collection and wait for the stable timestamp to exceed its creation on both nodes.
- assert.commandWorked(
- coll.insert({}, {writeConcern: {w: "majority", wtimeout: rst.kDefaultTimeoutMS}}));
+// Create a collection and wait for the stable timestamp to exceed its creation on both nodes.
+assert.commandWorked(
+ coll.insert({}, {writeConcern: {w: "majority", wtimeout: rst.kDefaultTimeoutMS}}));
- // Wait for the stable timestamp to match the latest oplog entry on both nodes.
- rst.awaitLastOpCommitted();
+// Wait for the stable timestamp to match the latest oplog entry on both nodes.
+rst.awaitLastOpCommitted();
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- assert.commandWorked(coll.createIndexes([{foo: 1}], {background: true}));
- rst.awaitReplication();
+assert.commandWorked(coll.createIndexes([{foo: 1}], {background: true}));
+rst.awaitReplication();
- rst.stopSet(undefined, true);
+rst.stopSet(undefined, true);
- // The `disableSnapshotting` failpoint is no longer in effect. Bring up and analyze each node
- // separately. The client does not need to perform any writes from here on out.
- for (let nodeIdx = 0; nodeIdx < 2; ++nodeIdx) {
- let node = nodes[nodeIdx];
- let nodeIdentity = tojsononeline({nodeIdx: nodeIdx, dbpath: node.dbpath, port: node.port});
+// The `disableSnapshotting` failpoint is no longer in effect. Bring up and analyze each node
+// separately. The client does not need to perform any writes from here on out.
+for (let nodeIdx = 0; nodeIdx < 2; ++nodeIdx) {
+ let node = nodes[nodeIdx];
+ let nodeIdentity = tojsononeline({nodeIdx: nodeIdx, dbpath: node.dbpath, port: node.port});
- // Bringing up the node as a standalone should only find the `_id` index.
- {
- jsTestLog("Starting as a standalone. Ensure only the `_id` index exists. Node: " +
- nodeIdentity);
- let conn = rst.start(nodeIdx, {noReplSet: true, noCleanData: true});
- assert.neq(null, conn, "failed to restart node");
- assert.eq(1, getColl(conn).getIndexes().length);
- rst.stop(nodeIdx);
- }
+ // Bringing up the node as a standalone should only find the `_id` index.
+ {
+ jsTestLog("Starting as a standalone. Ensure only the `_id` index exists. Node: " +
+ nodeIdentity);
+ let conn = rst.start(nodeIdx, {noReplSet: true, noCleanData: true});
+ assert.neq(null, conn, "failed to restart node");
+ assert.eq(1, getColl(conn).getIndexes().length);
+ rst.stop(nodeIdx);
+ }
- // Bringing up the node with `--replSet` will run oplog recovery. The `foo` index will be
- // rebuilt, but not become "stable".
- {
- jsTestLog("Starting as a replica set. Both indexes should exist. Node: " +
- nodeIdentity);
- let conn = rst.start(nodeIdx, {startClean: false}, true);
- conn.setSlaveOk();
- assert.eq(2, getColl(conn).getIndexes().length);
- rst.stop(nodeIdx);
- }
+ // Bringing up the node with `--replSet` will run oplog recovery. The `foo` index will be
+ // rebuilt, but not become "stable".
+ {
+ jsTestLog("Starting as a replica set. Both indexes should exist. Node: " + nodeIdentity);
+ let conn = rst.start(nodeIdx, {startClean: false}, true);
+ conn.setSlaveOk();
+ assert.eq(2, getColl(conn).getIndexes().length);
+ rst.stop(nodeIdx);
+ }
- // Restarting the node as a standalone once again only shows the `_id` index.
- {
- jsTestLog(
- "Starting as a standalone after replication startup recovery. Ensure only the `_id` index exists. Node: " +
- nodeIdentity);
- let conn = rst.start(nodeIdx, {noReplSet: true, noCleanData: true});
- assert.neq(null, conn, "failed to restart node");
- assert.eq(1, getColl(conn).getIndexes().length);
- rst.stop(nodeIdx);
- }
+ // Restarting the node as a standalone once again only shows the `_id` index.
+ {
+ jsTestLog(
+ "Starting as a standalone after replication startup recovery. Ensure only the `_id` index exists. Node: " +
+ nodeIdentity);
+ let conn = rst.start(nodeIdx, {noReplSet: true, noCleanData: true});
+ assert.neq(null, conn, "failed to restart node");
+ assert.eq(1, getColl(conn).getIndexes().length);
+ rst.stop(nodeIdx);
}
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/traffic_reading.js b/jstests/noPassthrough/traffic_reading.js
index d9569868002..aa29d360387 100644
--- a/jstests/noPassthrough/traffic_reading.js
+++ b/jstests/noPassthrough/traffic_reading.js
@@ -1,85 +1,83 @@
// tests for the traffic_recording commands.
(function() {
- // Variables for this test
- const recordingDir = MongoRunner.toRealDir("$dataDir/traffic_recording/");
- const recordingFile = "recording.txt";
- const recordingFilePath = MongoRunner.toRealDir(recordingDir + "/" + recordingFile);
- const replayFilePath = MongoRunner.toRealDir(recordingDir + "/replay.txt");
+// Variables for this test
+const recordingDir = MongoRunner.toRealDir("$dataDir/traffic_recording/");
+const recordingFile = "recording.txt";
+const recordingFilePath = MongoRunner.toRealDir(recordingDir + "/" + recordingFile);
+const replayFilePath = MongoRunner.toRealDir(recordingDir + "/replay.txt");
- assert.throws(function() {
- convertTrafficRecordingToBSON("notarealfileatall");
- });
+assert.throws(function() {
+ convertTrafficRecordingToBSON("notarealfileatall");
+});
- // Create the recording directory if it does not already exist
- mkdir(recordingDir);
+// Create the recording directory if it does not already exist
+mkdir(recordingDir);
- // Create the options and run mongod
- var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
- m = MongoRunner.runMongod(opts);
+// Create the options and run mongod
+var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
+m = MongoRunner.runMongod(opts);
- // Get the port of the host
- var serverPort = m.port;
+// Get the port of the host
+var serverPort = m.port;
- // Create necessary users
- adminDB = m.getDB("admin");
- const testDB = m.getDB("test");
- const coll = testDB.getCollection("foo");
- adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
- adminDB.auth("admin", "pass");
+// Create necessary users
+adminDB = m.getDB("admin");
+const testDB = m.getDB("test");
+const coll = testDB.getCollection("foo");
+adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
+adminDB.auth("admin", "pass");
- // Start recording traffic
- assert.commandWorked(
- adminDB.runCommand({'startRecordingTraffic': 1, 'filename': 'recording.txt'}));
+// Start recording traffic
+assert.commandWorked(adminDB.runCommand({'startRecordingTraffic': 1, 'filename': 'recording.txt'}));
- // Run a few commands
- assert.commandWorked(testDB.runCommand({"serverStatus": 1}));
- assert.commandWorked(coll.insert({"name": "foo biz bar"}));
- assert.eq("foo biz bar", coll.findOne().name);
- assert.commandWorked(coll.insert({"name": "foo bar"}));
- assert.eq("foo bar", coll.findOne({"name": "foo bar"}).name);
- assert.commandWorked(coll.deleteOne({}));
- assert.eq(1, coll.aggregate().toArray().length);
- assert.commandWorked(coll.update({}, {}));
+// Run a few commands
+assert.commandWorked(testDB.runCommand({"serverStatus": 1}));
+assert.commandWorked(coll.insert({"name": "foo biz bar"}));
+assert.eq("foo biz bar", coll.findOne().name);
+assert.commandWorked(coll.insert({"name": "foo bar"}));
+assert.eq("foo bar", coll.findOne({"name": "foo bar"}).name);
+assert.commandWorked(coll.deleteOne({}));
+assert.eq(1, coll.aggregate().toArray().length);
+assert.commandWorked(coll.update({}, {}));
- // Stop recording traffic
- assert.commandWorked(testDB.runCommand({'stopRecordingTraffic': 1}));
+// Stop recording traffic
+assert.commandWorked(testDB.runCommand({'stopRecordingTraffic': 1}));
- // Shutdown Mongod
- MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'password'});
+// Shutdown Mongod
+MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'password'});
- // Counters
- var numRequest = 0;
- var numResponse = 0;
- var opTypes = {};
+// Counters
+var numRequest = 0;
+var numResponse = 0;
+var opTypes = {};
- // Pass filepath to traffic_reader helper method to get recorded info in BSON
- var res = convertTrafficRecordingToBSON(recordingFilePath);
+// Pass filepath to traffic_reader helper method to get recorded info in BSON
+var res = convertTrafficRecordingToBSON(recordingFilePath);
- // Iterate through the results and assert the above commands are properly recorded
- res.forEach((obj) => {
- assert.eq(obj["rawop"]["header"]["opcode"], 2013);
- assert.eq(obj["seenconnectionnum"], 1);
- var responseTo = obj["rawop"]["header"]["responseto"];
- if (responseTo == 0) {
- assert.eq(obj["destendpoint"], serverPort.toString());
- numRequest++;
- } else {
- assert.eq(obj["srcendpoint"], serverPort.toString());
- numResponse++;
- }
- opTypes[obj["opType"]] = (opTypes[obj["opType"]] || 0) + 1;
- });
+// Iterate through the results and assert the above commands are properly recorded
+res.forEach((obj) => {
+ assert.eq(obj["rawop"]["header"]["opcode"], 2013);
+ assert.eq(obj["seenconnectionnum"], 1);
+ var responseTo = obj["rawop"]["header"]["responseto"];
+ if (responseTo == 0) {
+ assert.eq(obj["destendpoint"], serverPort.toString());
+ numRequest++;
+ } else {
+ assert.eq(obj["srcendpoint"], serverPort.toString());
+ numResponse++;
+ }
+ opTypes[obj["opType"]] = (opTypes[obj["opType"]] || 0) + 1;
+});
- // Assert there is a response for every request
- assert.eq(numResponse, numRequest);
-
- // Assert the opTypes were correct
- assert.eq(opTypes['isMaster'], opTypes["ismaster"]);
- assert.eq(opTypes['find'], 2);
- assert.eq(opTypes['insert'], 2);
- assert.eq(opTypes['delete'], 1);
- assert.eq(opTypes['update'], 1);
- assert.eq(opTypes['aggregate'], 1);
- assert.eq(opTypes['stopRecordingTraffic'], 1);
+// Assert there is a response for every request
+assert.eq(numResponse, numRequest);
+// Assert the opTypes were correct
+assert.eq(opTypes['isMaster'], opTypes["ismaster"]);
+assert.eq(opTypes['find'], 2);
+assert.eq(opTypes['insert'], 2);
+assert.eq(opTypes['delete'], 1);
+assert.eq(opTypes['update'], 1);
+assert.eq(opTypes['aggregate'], 1);
+assert.eq(opTypes['stopRecordingTraffic'], 1);
})();
diff --git a/jstests/noPassthrough/traffic_reading_legacy.js b/jstests/noPassthrough/traffic_reading_legacy.js
index 9224edf926a..69cda3be58f 100644
--- a/jstests/noPassthrough/traffic_reading_legacy.js
+++ b/jstests/noPassthrough/traffic_reading_legacy.js
@@ -1,72 +1,70 @@
// tests for the traffic_recording commands.
(function() {
- var baseName = "jstests_traffic_recording";
+var baseName = "jstests_traffic_recording";
- // Variables for this test
- const recordingDir = MongoRunner.toRealDir("$dataDir/traffic_recording/");
- const recordingFile = "recording.txt";
- const recordingFilePath = MongoRunner.toRealDir(recordingDir + "/" + recordingFile);
+// Variables for this test
+const recordingDir = MongoRunner.toRealDir("$dataDir/traffic_recording/");
+const recordingFile = "recording.txt";
+const recordingFilePath = MongoRunner.toRealDir(recordingDir + "/" + recordingFile);
- // Create the recording directory if it does not already exist
- mkdir(recordingDir);
+// Create the recording directory if it does not already exist
+mkdir(recordingDir);
- // Create the options and run mongod
- var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
- m = MongoRunner.runMongod(opts);
+// Create the options and run mongod
+var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir};
+m = MongoRunner.runMongod(opts);
- // Get the port of the host
- var serverPort = m.port;
+// Get the port of the host
+var serverPort = m.port;
- // Set the readMode and writeMode to legacy
- m.forceReadMode("legacy");
- m.forceWriteMode("legacy");
+// Set the readMode and writeMode to legacy
+m.forceReadMode("legacy");
+m.forceWriteMode("legacy");
- // Create necessary users
- adminDB = m.getDB("admin");
- const testDB = m.getDB("test");
- const coll = testDB.getCollection("foo");
- adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
- adminDB.auth("admin", "pass");
+// Create necessary users
+adminDB = m.getDB("admin");
+const testDB = m.getDB("test");
+const coll = testDB.getCollection("foo");
+adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
+adminDB.auth("admin", "pass");
- // Start recording traffic
- assert.commandWorked(
- adminDB.runCommand({'startRecordingTraffic': 1, 'filename': 'recording.txt'}));
+// Start recording traffic
+assert.commandWorked(adminDB.runCommand({'startRecordingTraffic': 1, 'filename': 'recording.txt'}));
- // Run a few commands
- testDB.runCommand({"serverStatus": 1});
- coll.insert({"name": "foo biz bar"});
- coll.findOne();
- coll.insert({"name": "foo bar"});
- coll.findOne({"name": "foo bar"});
- coll.deleteOne({});
+// Run a few commands
+testDB.runCommand({"serverStatus": 1});
+coll.insert({"name": "foo biz bar"});
+coll.findOne();
+coll.insert({"name": "foo bar"});
+coll.findOne({"name": "foo bar"});
+coll.deleteOne({});
- // Stop recording traffic
- assert.commandWorked(testDB.runCommand({'stopRecordingTraffic': 1}));
+// Stop recording traffic
+assert.commandWorked(testDB.runCommand({'stopRecordingTraffic': 1}));
- // Shutdown Mongod
- MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'password'});
+// Shutdown Mongod
+MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'password'});
- // Counters
- var opCodes = {};
+// Counters
+var opCodes = {};
- // Pass filepath to traffic_reader helper method to get recorded info in BSON
- var res = convertTrafficRecordingToBSON(recordingFilePath);
+// Pass filepath to traffic_reader helper method to get recorded info in BSON
+var res = convertTrafficRecordingToBSON(recordingFilePath);
- // Iterate through the results and assert the above commands are properly recorded
- res.forEach((obj) => {
- opCodes[obj["rawop"]["header"]["opcode"]] =
- (opCodes[obj["rawop"]["header"]["opcode"]] || 0) + 1;
- assert.eq(obj["seenconnectionnum"], 1);
- var responseTo = obj["rawop"]["header"]["responseto"];
- if (responseTo == 0) {
- assert.eq(obj["destendpoint"], serverPort.toString());
- } else {
- assert.eq(obj["srcendpoint"], serverPort.toString());
- }
- });
-
- // ensure legacy operations worked properly
- assert.eq(opCodes[2002], 2);
- assert.eq(opCodes[2006], 1);
+// Iterate through the results and assert the above commands are properly recorded
+res.forEach((obj) => {
+ opCodes[obj["rawop"]["header"]["opcode"]] =
+ (opCodes[obj["rawop"]["header"]["opcode"]] || 0) + 1;
+ assert.eq(obj["seenconnectionnum"], 1);
+ var responseTo = obj["rawop"]["header"]["responseto"];
+ if (responseTo == 0) {
+ assert.eq(obj["destendpoint"], serverPort.toString());
+ } else {
+ assert.eq(obj["srcendpoint"], serverPort.toString());
+ }
+});
+// ensure legacy operations worked properly
+assert.eq(opCodes[2002], 2);
+assert.eq(opCodes[2006], 1);
})();
diff --git a/jstests/noPassthrough/traffic_recording.js b/jstests/noPassthrough/traffic_recording.js
index 03828809a81..e748deb7e7b 100644
--- a/jstests/noPassthrough/traffic_recording.js
+++ b/jstests/noPassthrough/traffic_recording.js
@@ -1,126 +1,125 @@
// tests for the traffic_recording commands.
(function() {
- function getDB(client) {
- let db = client.getDB("admin");
- db.auth("admin", "pass");
+function getDB(client) {
+ let db = client.getDB("admin");
+ db.auth("admin", "pass");
- return db;
- }
+ return db;
+}
- function runTest(client, restartCommand) {
- let db = getDB(client);
-
- let res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
- assert.eq(res.ok, false);
- assert.eq(res["errmsg"], "Traffic recording directory not set");
-
- const path = MongoRunner.toRealDir("$dataDir/traffic_recording/");
- mkdir(path);
-
- if (!jsTest.isMongos(client)) {
- setJsTestOption("enableTestCommands", 0);
- client = restartCommand({
- trafficRecordingDirectory: path,
- AlwaysRecordTraffic: "notARealPath",
- enableTestCommands: 0,
- });
- setJsTestOption("enableTestCommands", 1);
- assert.eq(null, client, "AlwaysRecordTraffic and not enableTestCommands should fail");
- }
+function runTest(client, restartCommand) {
+ let db = getDB(client);
+
+ let res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
+ assert.eq(res.ok, false);
+ assert.eq(res["errmsg"], "Traffic recording directory not set");
+
+ const path = MongoRunner.toRealDir("$dataDir/traffic_recording/");
+ mkdir(path);
+ if (!jsTest.isMongos(client)) {
+ setJsTestOption("enableTestCommands", 0);
client = restartCommand({
trafficRecordingDirectory: path,
AlwaysRecordTraffic: "notARealPath",
- enableTestCommands: 1
+ enableTestCommands: 0,
});
- assert.neq(null, client, "AlwaysRecordTraffic and with enableTestCommands should suceed");
- db = getDB(client);
-
- assert(db.runCommand({"serverStatus": 1}).trafficRecording.running);
-
- client = restartCommand({trafficRecordingDirectory: path});
- db = getDB(client);
-
- res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
- assert.eq(res.ok, true);
-
- // Running the command again should fail
- res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
- assert.eq(res.ok, false);
- assert.eq(res["errmsg"], "Traffic recording already active");
-
- // Running the serverStatus command should return the relevant information
- res = db.runCommand({"serverStatus": 1});
- assert("trafficRecording" in res);
- let trafficStats = res["trafficRecording"];
- assert.eq(trafficStats["running"], true);
-
- // Assert that the current file size is growing
- res = db.runCommand({"serverStatus": 1});
- assert("trafficRecording" in res);
- let trafficStats2 = res["trafficRecording"];
- assert.eq(trafficStats2["running"], true);
- assert(trafficStats2["currentFileSize"] >= trafficStats["currentFileSize"]);
-
- // Running the stopRecordingTraffic command should succeed
- res = db.runCommand({'stopRecordingTraffic': 1});
- assert.eq(res.ok, true);
-
- // Running the stopRecordingTraffic command again should fail
- res = db.runCommand({'stopRecordingTraffic': 1});
- assert.eq(res.ok, false);
- assert.eq(res["errmsg"], "Traffic recording not active");
-
- // Running the serverStatus command should return running is false
- res = db.runCommand({"serverStatus": 1});
- assert("trafficRecording" in res);
- trafficStats = res["trafficRecording"];
- assert.eq(trafficStats["running"], false);
-
- return client;
+ setJsTestOption("enableTestCommands", 1);
+ assert.eq(null, client, "AlwaysRecordTraffic and not enableTestCommands should fail");
}
- {
- let m = MongoRunner.runMongod({auth: ""});
-
- let db = m.getDB("admin");
-
- db.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
- db.auth("admin", "pass");
+ client = restartCommand({
+ trafficRecordingDirectory: path,
+ AlwaysRecordTraffic: "notARealPath",
+ enableTestCommands: 1
+ });
+ assert.neq(null, client, "AlwaysRecordTraffic and with enableTestCommands should suceed");
+ db = getDB(client);
+
+ assert(db.runCommand({"serverStatus": 1}).trafficRecording.running);
+
+ client = restartCommand({trafficRecordingDirectory: path});
+ db = getDB(client);
+
+ res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
+ assert.eq(res.ok, true);
+
+ // Running the command again should fail
+ res = db.runCommand({'startRecordingTraffic': 1, 'filename': 'notARealPath'});
+ assert.eq(res.ok, false);
+ assert.eq(res["errmsg"], "Traffic recording already active");
+
+ // Running the serverStatus command should return the relevant information
+ res = db.runCommand({"serverStatus": 1});
+ assert("trafficRecording" in res);
+ let trafficStats = res["trafficRecording"];
+ assert.eq(trafficStats["running"], true);
+
+ // Assert that the current file size is growing
+ res = db.runCommand({"serverStatus": 1});
+ assert("trafficRecording" in res);
+ let trafficStats2 = res["trafficRecording"];
+ assert.eq(trafficStats2["running"], true);
+ assert(trafficStats2["currentFileSize"] >= trafficStats["currentFileSize"]);
+
+ // Running the stopRecordingTraffic command should succeed
+ res = db.runCommand({'stopRecordingTraffic': 1});
+ assert.eq(res.ok, true);
+
+ // Running the stopRecordingTraffic command again should fail
+ res = db.runCommand({'stopRecordingTraffic': 1});
+ assert.eq(res.ok, false);
+ assert.eq(res["errmsg"], "Traffic recording not active");
+
+ // Running the serverStatus command should return running is false
+ res = db.runCommand({"serverStatus": 1});
+ assert("trafficRecording" in res);
+ trafficStats = res["trafficRecording"];
+ assert.eq(trafficStats["running"], false);
+
+ return client;
+}
+
+{
+ let m = MongoRunner.runMongod({auth: ""});
+
+ let db = m.getDB("admin");
+
+ db.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
+ db.auth("admin", "pass");
+
+ m = runTest(m, function(setParams) {
+ if (m) {
+ MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'pass'});
+ }
+ m = MongoRunner.runMongod({auth: "", setParameter: setParams});
- m = runTest(m, function(setParams) {
- if (m) {
- MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'pass'});
- }
- m = MongoRunner.runMongod({auth: "", setParameter: setParams});
+ if (m) {
+ m.getDB("admin").createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
+ }
- if (m) {
- m.getDB("admin").createUser(
- {user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
- }
+ return m;
+ });
- return m;
- });
+ MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'pass'});
+}
- MongoRunner.stopMongod(m, null, {user: 'admin', pwd: 'pass'});
- }
+{
+ let shardTest = new ShardingTest({
+ config: 1,
+ mongos: 1,
+ shards: 0,
+ });
- {
- let shardTest = new ShardingTest({
- config: 1,
- mongos: 1,
- shards: 0,
+ runTest(shardTest.s, function(setParams) {
+ shardTest.restartMongos(0, {
+ restart: true,
+ setParameter: setParams,
});
- runTest(shardTest.s, function(setParams) {
- shardTest.restartMongos(0, {
- restart: true,
- setParameter: setParams,
- });
+ return shardTest.s;
+ });
- return shardTest.s;
- });
-
- shardTest.stop();
- }
+ shardTest.stop();
+}
})();
diff --git a/jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js b/jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js
index 6477fc8a4ca..82dba9f08be 100644
--- a/jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js
+++ b/jstests/noPassthrough/transactionLifetimeLimitSeconds_serverParameter.js
@@ -2,20 +2,20 @@
// startup and via setParameter command. Valid parameter values are in the range [1, infinity).
(function() {
- 'use strict';
+'use strict';
- load("jstests/noPassthrough/libs/server_parameter_helpers.js");
+load("jstests/noPassthrough/libs/server_parameter_helpers.js");
- // transactionLifetimeLimitSeconds is set to be higher than its default value in test suites.
- delete TestData.transactionLifetimeLimitSeconds;
+// transactionLifetimeLimitSeconds is set to be higher than its default value in test suites.
+delete TestData.transactionLifetimeLimitSeconds;
- testNumericServerParameter("transactionLifetimeLimitSeconds",
- true /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 60 /*defaultValue*/,
- 30 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+testNumericServerParameter("transactionLifetimeLimitSeconds",
+ true /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 60 /*defaultValue*/,
+ 30 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
})();
diff --git a/jstests/noPassthrough/transaction_reaper.js b/jstests/noPassthrough/transaction_reaper.js
index b0574c099c7..5f0536f0d7e 100644
--- a/jstests/noPassthrough/transaction_reaper.js
+++ b/jstests/noPassthrough/transaction_reaper.js
@@ -1,167 +1,166 @@
// @tags: [requires_replication, requires_sharding]
(function() {
- 'use strict';
+'use strict';
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- function Repl(lifetime) {
- this.rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: lifetime}},
- });
- this.rst.startSet();
- this.rst.initiate();
- }
+function Repl(lifetime) {
+ this.rst = new ReplSetTest({
+ nodes: 1,
+ nodeOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: lifetime}},
+ });
+ this.rst.startSet();
+ this.rst.initiate();
+}
+
+Repl.prototype.stop = function() {
+ this.rst.stopSet();
+};
+
+Repl.prototype.getConn = function() {
+ return this.rst.getPrimary();
+};
+
+Repl.prototype.getTransactionConn = function() {
+ return this.rst.getPrimary();
+};
+
+function Sharding(lifetime) {
+ this.st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ other: {
+ rs: true,
+ rsOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: lifetime}},
+ rs0: {nodes: 1},
+ },
+ });
+
+ this.st.s0.getDB("admin").runCommand({enableSharding: "test"});
+ this.st.s0.getDB("admin").runCommand({shardCollection: "test.test", key: {_id: 1}});
+
+ // Ensure that the sessions collection exists.
+ assert.commandWorked(this.st.c0.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(
+ this.st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
+}
+
+Sharding.prototype.stop = function() {
+ this.st.stop();
+};
+
+Sharding.prototype.getConn = function() {
+ return this.st.s0;
+};
+
+Sharding.prototype.getTransactionConn = function() {
+ return this.st.rs0.getPrimary();
+};
- Repl.prototype.stop = function() {
- this.rst.stopSet();
- };
-
- Repl.prototype.getConn = function() {
- return this.rst.getPrimary();
- };
-
- Repl.prototype.getTransactionConn = function() {
- return this.rst.getPrimary();
- };
-
- function Sharding(lifetime) {
- this.st = new ShardingTest({
- shards: 1,
- mongos: 1,
- config: 1,
- other: {
- rs: true,
- rsOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: lifetime}},
- rs0: {nodes: 1},
- },
- });
-
- this.st.s0.getDB("admin").runCommand({enableSharding: "test"});
- this.st.s0.getDB("admin").runCommand({shardCollection: "test.test", key: {_id: 1}});
-
- // Ensure that the sessions collection exists.
- assert.commandWorked(
- this.st.c0.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
- assert.commandWorked(
- this.st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
+const nSessions = 1500;
+
+function Fixture(impl) {
+ this.impl = impl;
+ this.conn = impl.getConn();
+ this.transactionConn = impl.getTransactionConn();
+
+ this.sessions = [];
+
+ for (var i = 0; i < nSessions; i++) {
+ // make a session and get it to the collection
+ var session = this.conn.startSession({retryWrites: 1});
+ session.getDatabase("test").test.count({});
+ this.sessions.push(session);
}
- Sharding.prototype.stop = function() {
- this.st.stop();
- };
-
- Sharding.prototype.getConn = function() {
- return this.st.s0;
- };
-
- Sharding.prototype.getTransactionConn = function() {
- return this.st.rs0.getPrimary();
- };
-
- const nSessions = 1500;
-
- function Fixture(impl) {
- this.impl = impl;
- this.conn = impl.getConn();
- this.transactionConn = impl.getTransactionConn();
-
- this.sessions = [];
-
- for (var i = 0; i < nSessions; i++) {
- // make a session and get it to the collection
- var session = this.conn.startSession({retryWrites: 1});
- session.getDatabase("test").test.count({});
- this.sessions.push(session);
- }
-
- this.refresh();
- this.assertOutstandingTransactions(0);
- this.assertOutstandingSessions(nSessions);
-
- for (var i = 0; i < nSessions; i++) {
- // make a session and get it to the collection
- var session = this.sessions[i];
- assert.writeOK(session.getDatabase("test").test.save({a: 1}));
- }
-
- // Ensure a write flushes a transaction
- this.assertOutstandingTransactions(nSessions);
- this.assertOutstandingSessions(nSessions);
-
- // Ensure a refresh/reap doesn't remove the transaction
- this.refresh();
- this.reap();
- this.assertOutstandingTransactions(nSessions);
- this.assertOutstandingSessions(nSessions);
+ this.refresh();
+ this.assertOutstandingTransactions(0);
+ this.assertOutstandingSessions(nSessions);
+
+ for (var i = 0; i < nSessions; i++) {
+ // make a session and get it to the collection
+ var session = this.sessions[i];
+ assert.writeOK(session.getDatabase("test").test.save({a: 1}));
}
- Fixture.prototype.assertOutstandingTransactions = function(count) {
- assert.eq(count, this.transactionConn.getDB("config").transactions.count());
- };
-
- Fixture.prototype.assertOutstandingSessions = function(count) {
- assert.eq(count, this.getDB("config").system.sessions.count());
- };
-
- Fixture.prototype.refresh = function() {
- assert.commandWorked(this.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
- };
-
- Fixture.prototype.reap = function() {
- assert.commandWorked(
- this.transactionConn.getDB("admin").runCommand({reapLogicalSessionCacheNow: 1}));
- };
-
- Fixture.prototype.getDB = function(db) {
- return this.conn.getDB(db);
- };
-
- Fixture.prototype.stop = function() {
- this.sessions.forEach(function(session) {
- session.endSession();
- });
- return this.impl.stop();
- };
-
- [Repl, Sharding].forEach(function(Impl) {
- {
- var fixture = new Fixture(new Impl(-1));
- // Remove a session
- fixture.getDB("config").system.sessions.remove({});
- fixture.assertOutstandingTransactions(nSessions);
- fixture.assertOutstandingSessions(0);
-
- // See the transaction get reaped as a result
- fixture.reap();
- fixture.assertOutstandingTransactions(0);
- fixture.assertOutstandingSessions(0);
-
- fixture.stop();
- }
-
- {
- var fixture = new Fixture(new Impl(30));
- // Remove a session
- fixture.getDB("config").system.sessions.remove({});
- fixture.assertOutstandingTransactions(nSessions);
- fixture.assertOutstandingSessions(0);
-
- // See the transaction was not reaped as a result
- fixture.reap();
- fixture.assertOutstandingTransactions(nSessions);
- fixture.assertOutstandingSessions(0);
-
- fixture.stop();
- }
+ // Ensure a write flushes a transaction
+ this.assertOutstandingTransactions(nSessions);
+ this.assertOutstandingSessions(nSessions);
+
+ // Ensure a refresh/reap doesn't remove the transaction
+ this.refresh();
+ this.reap();
+ this.assertOutstandingTransactions(nSessions);
+ this.assertOutstandingSessions(nSessions);
+}
+
+Fixture.prototype.assertOutstandingTransactions = function(count) {
+ assert.eq(count, this.transactionConn.getDB("config").transactions.count());
+};
+
+Fixture.prototype.assertOutstandingSessions = function(count) {
+ assert.eq(count, this.getDB("config").system.sessions.count());
+};
+
+Fixture.prototype.refresh = function() {
+ assert.commandWorked(this.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1}));
+};
+
+Fixture.prototype.reap = function() {
+ assert.commandWorked(
+ this.transactionConn.getDB("admin").runCommand({reapLogicalSessionCacheNow: 1}));
+};
+
+Fixture.prototype.getDB = function(db) {
+ return this.conn.getDB(db);
+};
+
+Fixture.prototype.stop = function() {
+ this.sessions.forEach(function(session) {
+ session.endSession();
});
+ return this.impl.stop();
+};
+
+[Repl, Sharding].forEach(function(Impl) {
+ {
+ var fixture = new Fixture(new Impl(-1));
+ // Remove a session
+ fixture.getDB("config").system.sessions.remove({});
+ fixture.assertOutstandingTransactions(nSessions);
+ fixture.assertOutstandingSessions(0);
+
+ // See the transaction get reaped as a result
+ fixture.reap();
+ fixture.assertOutstandingTransactions(0);
+ fixture.assertOutstandingSessions(0);
+
+ fixture.stop();
+ }
+
+ {
+ var fixture = new Fixture(new Impl(30));
+ // Remove a session
+ fixture.getDB("config").system.sessions.remove({});
+ fixture.assertOutstandingTransactions(nSessions);
+ fixture.assertOutstandingSessions(0);
+
+ // See the transaction was not reaped as a result
+ fixture.reap();
+ fixture.assertOutstandingTransactions(nSessions);
+ fixture.assertOutstandingSessions(0);
+
+ fixture.stop();
+ }
+});
})();
diff --git a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js b/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
index 9dee2103f94..48de0c880c8 100644
--- a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
+++ b/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js
@@ -9,59 +9,59 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- const name = "transaction_write_with_snapshot_unavailable";
- const replTest = new ReplSetTest({name: name, nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const name = "transaction_write_with_snapshot_unavailable";
+const replTest = new ReplSetTest({name: name, nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const dbNameB = dbName + "B";
- const collName = "collection";
- const collNameB = collName + "B";
+const dbName = name;
+const dbNameB = dbName + "B";
+const collName = "collection";
+const collNameB = collName + "B";
- const primary = replTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
+const primary = replTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
- assert.commandWorked(primaryDB[collName].insertOne({}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(primaryDB[collName].insertOne({}, {writeConcern: {w: "majority"}}));
- function testOp(cmd) {
- let op = Object.getOwnPropertyNames(cmd)[0];
- let session = primary.startSession();
- let sessionDB = session.getDatabase(name);
+function testOp(cmd) {
+ let op = Object.getOwnPropertyNames(cmd)[0];
+ let session = primary.startSession();
+ let sessionDB = session.getDatabase(name);
- jsTestLog(
- `Testing that SnapshotUnavailable during ${op} is labelled TransientTransactionError`);
+ jsTestLog(
+ `Testing that SnapshotUnavailable during ${op} is labelled TransientTransactionError`);
- session.startTransaction({readConcern: {level: "snapshot"}});
- assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{}]}));
- // Create collection outside transaction, cannot write to it in the transaction
- assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand({create: collNameB}));
+ session.startTransaction({readConcern: {level: "snapshot"}});
+ assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{}]}));
+ // Create collection outside transaction, cannot write to it in the transaction
+ assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand({create: collNameB}));
- let res;
- try {
- res = sessionDB.getSiblingDB(dbNameB).runCommand(cmd);
- assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);
- assert.eq(res.ok, 0);
- assert(!res.hasOwnProperty("writeErrors"));
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- } catch (ex) {
- printjson(cmd);
- printjson(res);
- throw ex;
- }
-
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand(
- {dropDatabase: 1, writeConcern: {w: "majority"}}));
+ let res;
+ try {
+ res = sessionDB.getSiblingDB(dbNameB).runCommand(cmd);
+ assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);
+ assert.eq(res.ok, 0);
+ assert(!res.hasOwnProperty("writeErrors"));
+ assert.eq(res.errorLabels, ["TransientTransactionError"]);
+ } catch (ex) {
+ printjson(cmd);
+ printjson(res);
+ throw ex;
}
- testOp({insert: collNameB, documents: [{_id: 0}]});
- testOp({update: collNameB, updates: [{q: {}, u: {$set: {x: 1}}}]});
- testOp({delete: collNameB, deletes: [{q: {_id: 0}, limit: 1}]});
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand(
+ {dropDatabase: 1, writeConcern: {w: "majority"}}));
+}
+
+testOp({insert: collNameB, documents: [{_id: 0}]});
+testOp({update: collNameB, updates: [{q: {}, u: {$set: {x: 1}}}]});
+testOp({delete: collNameB, deletes: [{q: {_id: 0}, limit: 1}]});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/noPassthrough/transactions_work_with_in_memory_engine.js b/jstests/noPassthrough/transactions_work_with_in_memory_engine.js
index 7966656e390..fef2349265f 100644
--- a/jstests/noPassthrough/transactions_work_with_in_memory_engine.js
+++ b/jstests/noPassthrough/transactions_work_with_in_memory_engine.js
@@ -6,35 +6,37 @@
* engine (SERVER-36023).
*/
(function() {
- "use strict";
+"use strict";
- if (jsTest.options().storageEngine !== "inMemory") {
- jsTestLog("Skipping test because storageEngine is not inMemory");
- return;
- }
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTestLog("Skipping test because storageEngine is not inMemory");
+ return;
+}
- const dbName = "test";
- const collName = "transactions_work_with_in_memory_engine";
+const dbName = "test";
+const collName = "transactions_work_with_in_memory_engine";
- const replTest = new ReplSetTest({name: collName, nodes: 1});
- replTest.startSet({storageEngine: "inMemory"});
- replTest.initiate();
+const replTest = new ReplSetTest({name: collName, nodes: 1});
+replTest.startSet({storageEngine: "inMemory"});
+replTest.initiate();
- const primary = replTest.getPrimary();
+const primary = replTest.getPrimary();
- // Initiate a session.
- const sessionOptions = {causalConsistency: false};
- const session = primary.getDB(dbName).getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
+// Initiate a session.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = primary.getDB(dbName).getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
- // Create collection.
- assert.commandWorked(sessionDb[collName].insert({x: 0}));
+// Create collection.
+assert.commandWorked(sessionDb[collName].insert({x: 0}));
- // Execute a transaction that should succeed.
- session.startTransaction();
- assert.commandWorked(sessionDb[collName].insert({x: 1}));
- assert.commandWorked(session.commitTransaction_forTesting());
+// Execute a transaction that should succeed.
+session.startTransaction();
+assert.commandWorked(sessionDb[collName].insert({x: 1}));
+assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- replTest.stopSet();
+session.endSession();
+replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js b/jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js
index 93eaa49500e..7d5c3ea00d2 100644
--- a/jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js
+++ b/jstests/noPassthrough/ttlMonitorSleepSecs_parameter.js
@@ -1,18 +1,19 @@
// Tests the ttlMonitorSleepSecs parameter
(function() {
- 'use strict';
+'use strict';
- load('jstests/noPassthrough/libs/server_parameter_helpers.js');
+load('jstests/noPassthrough/libs/server_parameter_helpers.js');
- testNumericServerParameter('ttlMonitorSleepSecs',
- true, // is Startup Param
- false, // is runtime param
- 60, // default value
- 30, // valid, non-default value
- true, // has lower bound
- 0, // out of bound value (below lower bound)
- false, // has upper bound
- 'unused' // out of bounds value (above upper bound)
- );
+testNumericServerParameter(
+ 'ttlMonitorSleepSecs',
+ true, // is Startup Param
+ false, // is runtime param
+ 60, // default value
+ 30, // valid, non-default value
+ true, // has lower bound
+ 0, // out of bound value (below lower bound)
+ false, // has upper bound
+ 'unused' // out of bounds value (above upper bound)
+);
})();
diff --git a/jstests/noPassthrough/ttl_capped.js b/jstests/noPassthrough/ttl_capped.js
index d3d383cc984..c9eabbc0df7 100644
--- a/jstests/noPassthrough/ttl_capped.js
+++ b/jstests/noPassthrough/ttl_capped.js
@@ -4,83 +4,83 @@
* @tags: [requires_capped]
*/
(function() {
- "use strict";
+"use strict";
- var dbpath = MongoRunner.dataPath + "ttl_capped";
- resetDbpath(dbpath);
+var dbpath = MongoRunner.dataPath + "ttl_capped";
+resetDbpath(dbpath);
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- setParameter: "ttlMonitorSleepSecs=1",
- });
- assert.neq(null, conn, "mongod was unable to start up");
+var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ setParameter: "ttlMonitorSleepSecs=1",
+});
+assert.neq(null, conn, "mongod was unable to start up");
- var testDB = conn.getDB("test");
+var testDB = conn.getDB("test");
- assert.commandWorked(testDB.adminCommand({setParameter: 1, ttlMonitorEnabled: false}));
+assert.commandWorked(testDB.adminCommand({setParameter: 1, ttlMonitorEnabled: false}));
- var now = Date.now();
- var expireAfterSeconds = 10;
+var now = Date.now();
+var expireAfterSeconds = 10;
- var numCollectionsToCreate = 20;
- var width = numCollectionsToCreate.toString().length;
+var numCollectionsToCreate = 20;
+var width = numCollectionsToCreate.toString().length;
- // Create 'numCollectionsToCreate' collections with a TTL index, where every third collection is
- // capped. We create many collections with a TTL index to increase the odds that the TTL monitor
- // would process a non-capped collection after a capped collection. This allows us to verify
- // that the TTL monitor continues processing the remaining collections after encountering an
- // error processing a capped collection.
- for (var i = 0; i < numCollectionsToCreate; i++) {
- var collName = "ttl" + i.zeroPad(width);
- if (i % 3 === 1) {
- assert.commandWorked(testDB.createCollection(collName, {capped: true, size: 4096}));
- }
+// Create 'numCollectionsToCreate' collections with a TTL index, where every third collection is
+// capped. We create many collections with a TTL index to increase the odds that the TTL monitor
+// would process a non-capped collection after a capped collection. This allows us to verify
+// that the TTL monitor continues processing the remaining collections after encountering an
+// error processing a capped collection.
+for (var i = 0; i < numCollectionsToCreate; i++) {
+ var collName = "ttl" + i.zeroPad(width);
+ if (i % 3 === 1) {
+ assert.commandWorked(testDB.createCollection(collName, {capped: true, size: 4096}));
+ }
- // Create a TTL index on the 'date' field of the collection.
- var res = testDB[collName].ensureIndex({date: 1}, {expireAfterSeconds: expireAfterSeconds});
- assert.commandWorked(res);
+ // Create a TTL index on the 'date' field of the collection.
+ var res = testDB[collName].ensureIndex({date: 1}, {expireAfterSeconds: expireAfterSeconds});
+ assert.commandWorked(res);
- // Insert a single document with a 'date' field that is already expired according to the
- // index definition.
- assert.writeOK(testDB[collName].insert({date: new Date(now - expireAfterSeconds * 1000)}));
- }
+ // Insert a single document with a 'date' field that is already expired according to the
+ // index definition.
+ assert.writeOK(testDB[collName].insert({date: new Date(now - expireAfterSeconds * 1000)}));
+}
- // Increase the verbosity of the TTL monitor's output.
- assert.commandWorked(testDB.adminCommand({setParameter: 1, logComponentVerbosity: {index: 1}}));
+// Increase the verbosity of the TTL monitor's output.
+assert.commandWorked(testDB.adminCommand({setParameter: 1, logComponentVerbosity: {index: 1}}));
- // Enable the TTL monitor and wait for it to run.
- var ttlPasses = testDB.serverStatus().metrics.ttl.passes;
- assert.commandWorked(testDB.adminCommand({setParameter: 1, ttlMonitorEnabled: true}));
+// Enable the TTL monitor and wait for it to run.
+var ttlPasses = testDB.serverStatus().metrics.ttl.passes;
+assert.commandWorked(testDB.adminCommand({setParameter: 1, ttlMonitorEnabled: true}));
- var timeoutSeconds = 60;
- assert.soon(
- function checkIfTTLMonitorRan() {
- // The 'ttl.passes' metric is incremented when the TTL monitor starts processing the
- // indexes, so we wait for it to be incremented twice to know that the TTL monitor
- // finished processing the indexes at least once.
- return testDB.serverStatus().metrics.ttl.passes >= ttlPasses + 2;
- },
- function msg() {
- return "TTL monitor didn't run within " + timeoutSeconds + " seconds";
- },
- timeoutSeconds * 1000);
+var timeoutSeconds = 60;
+assert.soon(
+ function checkIfTTLMonitorRan() {
+ // The 'ttl.passes' metric is incremented when the TTL monitor starts processing the
+ // indexes, so we wait for it to be incremented twice to know that the TTL monitor
+ // finished processing the indexes at least once.
+ return testDB.serverStatus().metrics.ttl.passes >= ttlPasses + 2;
+ },
+ function msg() {
+ return "TTL monitor didn't run within " + timeoutSeconds + " seconds";
+ },
+ timeoutSeconds * 1000);
- for (var i = 0; i < numCollectionsToCreate; i++) {
- var coll = testDB["ttl" + i.zeroPad(width)];
- var count = coll.count();
- if (i % 3 === 1) {
- assert.eq(1,
- count,
- "the TTL monitor shouldn't have removed expired documents from" +
- " the capped collection '" + coll.getFullName() + "'");
- } else {
- assert.eq(0,
- count,
- "the TTL monitor didn't removed expired documents from the" +
- " collection '" + coll.getFullName() + "'");
- }
+for (var i = 0; i < numCollectionsToCreate; i++) {
+ var coll = testDB["ttl" + i.zeroPad(width)];
+ var count = coll.count();
+ if (i % 3 === 1) {
+ assert.eq(1,
+ count,
+ "the TTL monitor shouldn't have removed expired documents from" +
+ " the capped collection '" + coll.getFullName() + "'");
+ } else {
+ assert.eq(0,
+ count,
+ "the TTL monitor didn't removed expired documents from the" +
+ " collection '" + coll.getFullName() + "'");
}
+}
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/ttl_partial_index.js b/jstests/noPassthrough/ttl_partial_index.js
index d818f86d28d..af4c9c1a7fb 100644
--- a/jstests/noPassthrough/ttl_partial_index.js
+++ b/jstests/noPassthrough/ttl_partial_index.js
@@ -1,31 +1,31 @@
// Test that the TTL monitor will correctly use TTL indexes that are also partial indexes.
// SERVER-17984.
(function() {
- "use strict";
- // Launch mongod with shorter TTL monitor sleep interval.
- var runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"});
- var coll = runner.getDB("test").ttl_partial_index;
- coll.drop();
+"use strict";
+// Launch mongod with shorter TTL monitor sleep interval.
+var runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"});
+var coll = runner.getDB("test").ttl_partial_index;
+coll.drop();
- // Create TTL partial index.
- assert.commandWorked(coll.ensureIndex(
- {x: 1}, {expireAfterSeconds: 0, partialFilterExpression: {z: {$exists: true}}}));
+// Create TTL partial index.
+assert.commandWorked(coll.ensureIndex(
+ {x: 1}, {expireAfterSeconds: 0, partialFilterExpression: {z: {$exists: true}}}));
- var now = new Date();
- assert.writeOK(coll.insert({x: now, z: 2}));
- assert.writeOK(coll.insert({x: now}));
+var now = new Date();
+assert.writeOK(coll.insert({x: now, z: 2}));
+assert.writeOK(coll.insert({x: now}));
- // Wait for the TTL monitor to run at least twice (in case we weren't finished setting up our
- // collection when it ran the first time).
- var ttlPass = coll.getDB().serverStatus().metrics.ttl.passes;
- assert.soon(function() {
- return coll.getDB().serverStatus().metrics.ttl.passes >= ttlPass + 2;
- }, "TTL monitor didn't run before timing out.");
+// Wait for the TTL monitor to run at least twice (in case we weren't finished setting up our
+// collection when it ran the first time).
+var ttlPass = coll.getDB().serverStatus().metrics.ttl.passes;
+assert.soon(function() {
+ return coll.getDB().serverStatus().metrics.ttl.passes >= ttlPass + 2;
+}, "TTL monitor didn't run before timing out.");
- assert.eq(0,
- coll.find({z: {$exists: true}}).hint({x: 1}).itcount(),
- "Wrong number of documents in partial index, after TTL monitor run");
- assert.eq(
- 1, coll.find().itcount(), "Wrong number of documents in collection, after TTL monitor run");
- MongoRunner.stopMongod(runner);
+assert.eq(0,
+ coll.find({z: {$exists: true}}).hint({x: 1}).itcount(),
+ "Wrong number of documents in partial index, after TTL monitor run");
+assert.eq(
+ 1, coll.find().itcount(), "Wrong number of documents in collection, after TTL monitor run");
+MongoRunner.stopMongod(runner);
})();
diff --git a/jstests/noPassthrough/two_phase_index_build.js b/jstests/noPassthrough/two_phase_index_build.js
index d427571b2ec..084a360df11 100644
--- a/jstests/noPassthrough/two_phase_index_build.js
+++ b/jstests/noPassthrough/two_phase_index_build.js
@@ -6,71 +6,71 @@
(function() {
- // For 'assertIndexes'.
- load("jstests/noPassthrough/libs/index_build.js");
-
- const replSet = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+// For 'assertIndexes'.
+load("jstests/noPassthrough/libs/index_build.js");
+
+const replSet = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
+ },
+ ]
+});
- // Allow the createIndexes command to use the index builds coordinator in single-phase mode.
- replSet.startSet({setParameter: {enableIndexBuildsCoordinatorForCreateIndexesCommand: true}});
- replSet.initiate();
+// Allow the createIndexes command to use the index builds coordinator in single-phase mode.
+replSet.startSet({setParameter: {enableIndexBuildsCoordinatorForCreateIndexesCommand: true}});
+replSet.initiate();
- const testDB = replSet.getPrimary().getDB('test');
- const coll = testDB.twoPhaseIndexBuild;
- const collName = coll.getName();
- const secondaryColl = replSet.getSecondary().getDB('test')[collName];
+const testDB = replSet.getPrimary().getDB('test');
+const coll = testDB.twoPhaseIndexBuild;
+const collName = coll.getName();
+const secondaryColl = replSet.getSecondary().getDB('test')[collName];
- const bulk = coll.initializeUnorderedBulkOp();
- const numDocs = 1000;
- for (let i = 0; i < numDocs; i++) {
- bulk.insert({a: i, b: i});
- }
- assert.commandWorked(bulk.execute());
+const bulk = coll.initializeUnorderedBulkOp();
+const numDocs = 1000;
+for (let i = 0; i < numDocs; i++) {
+ bulk.insert({a: i, b: i});
+}
+assert.commandWorked(bulk.execute());
- // Use index builds coordinator for a two-phase build
- assert.commandWorked(testDB.runCommand(
- {twoPhaseCreateIndexes: coll.getName(), indexes: [{key: {a: 1}, name: 'a_1'}]}));
+// Use index builds coordinator for a two-phase build
+assert.commandWorked(testDB.runCommand(
+ {twoPhaseCreateIndexes: coll.getName(), indexes: [{key: {a: 1}, name: 'a_1'}]}));
- IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
- assert.eq(numDocs, coll.find({a: {$gte: 0}}).hint({a: 1}).itcount());
+IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]);
+assert.eq(numDocs, coll.find({a: {$gte: 0}}).hint({a: 1}).itcount());
- const cmdNs = testDB.getName() + ".$cmd";
- const localDB = testDB.getSiblingDB("local");
- const oplogColl = localDB.oplog.rs;
+const cmdNs = testDB.getName() + ".$cmd";
+const localDB = testDB.getSiblingDB("local");
+const oplogColl = localDB.oplog.rs;
- // Ensure both oplog entries were written to the oplog.
- assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.startIndexBuild": collName}).itcount());
- assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.commitIndexBuild": collName}).itcount());
+// Ensure both oplog entries were written to the oplog.
+assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.startIndexBuild": collName}).itcount());
+assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.commitIndexBuild": collName}).itcount());
- // Ensure the secondary builds the index.
- replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
- IndexBuildTest.assertIndexes(secondaryColl, 2, ["_id_", "a_1"]);
+// Ensure the secondary builds the index.
+replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
+IndexBuildTest.assertIndexes(secondaryColl, 2, ["_id_", "a_1"]);
- // Use index build coordinator for a single-phase index build through the createIndexes
- // command.
- assert.commandWorked(
- testDB.runCommand({createIndexes: coll.getName(), indexes: [{key: {b: 1}, name: 'b_1'}]}));
+// Use index build coordinator for a single-phase index build through the createIndexes
+// command.
+assert.commandWorked(
+ testDB.runCommand({createIndexes: coll.getName(), indexes: [{key: {b: 1}, name: 'b_1'}]}));
- IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1", "b_1"]);
- assert.eq(numDocs, coll.find({a: {$gte: 0}}).hint({b: 1}).itcount());
+IndexBuildTest.assertIndexes(coll, 3, ["_id_", "a_1", "b_1"]);
+assert.eq(numDocs, coll.find({a: {$gte: 0}}).hint({b: 1}).itcount());
- // Ensure only one oplog entry was written to the oplog.
- assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.createIndexes": collName}).itcount());
+// Ensure only one oplog entry was written to the oplog.
+assert.eq(1, oplogColl.find({op: "c", ns: cmdNs, "o.createIndexes": collName}).itcount());
- // Ensure the secondary builds the index.
- replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
- IndexBuildTest.assertIndexes(secondaryColl, 3, ["_id_", "a_1", "b_1"]);
+// Ensure the secondary builds the index.
+replSet.waitForAllIndexBuildsToFinish(testDB.getName(), collName);
+IndexBuildTest.assertIndexes(secondaryColl, 3, ["_id_", "a_1", "b_1"]);
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js b/jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js
index 72ec4cdb7cb..5d19e39f40e 100644
--- a/jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js
+++ b/jstests/noPassthrough/two_phase_index_build_ops_disabled_through_applyops.js
@@ -7,50 +7,43 @@
(function() {
- const replSet = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const replSet = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ]
- });
-
- replSet.startSet();
- replSet.initiate();
-
- const testDB = replSet.getPrimary().getDB('test');
- const coll = testDB.twoPhaseIndexBuild;
- const cmdNs = testDB.getName() + ".$cmd";
-
- coll.insert({a: 1});
-
- assert.commandFailedWithCode(testDB.adminCommand({
- applyOps: [
- {op: "c", ns: cmdNs, o: {startIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}
- ]
- }),
- [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
-
- assert.commandFailedWithCode(testDB.adminCommand({
- applyOps: [{
- op: "c",
- ns: cmdNs,
- o: {commitIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}
- }]
- }),
- [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
-
- assert.commandFailedWithCode(testDB.adminCommand({
- applyOps: [
- {op: "c", ns: cmdNs, o: {abortIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}
- ]
- }),
- [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
-
- replSet.stopSet();
+ },
+ ]
+});
+
+replSet.startSet();
+replSet.initiate();
+
+const testDB = replSet.getPrimary().getDB('test');
+const coll = testDB.twoPhaseIndexBuild;
+const cmdNs = testDB.getName() + ".$cmd";
+
+coll.insert({a: 1});
+
+assert.commandFailedWithCode(testDB.adminCommand({
+ applyOps: [{op: "c", ns: cmdNs, o: {startIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}]
+}),
+ [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
+
+assert.commandFailedWithCode(testDB.adminCommand({
+ applyOps:
+ [{op: "c", ns: cmdNs, o: {commitIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}]
+}),
+ [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
+
+assert.commandFailedWithCode(testDB.adminCommand({
+ applyOps: [{op: "c", ns: cmdNs, o: {abortIndexBuild: coll.getName(), key: {a: 1}, name: 'a_1'}}]
+}),
+ [ErrorCodes.CommandNotSupported, ErrorCodes.FailedToParse]);
+
+replSet.stopSet();
})();
diff --git a/jstests/noPassthrough/txn_override_causal_consistency.js b/jstests/noPassthrough/txn_override_causal_consistency.js
index 8ec6c0d276c..ac7c9758c96 100644
--- a/jstests/noPassthrough/txn_override_causal_consistency.js
+++ b/jstests/noPassthrough/txn_override_causal_consistency.js
@@ -5,206 +5,206 @@
* @tags: [requires_replication, uses_transactions]
*/
(function() {
- "use strict";
-
- const dbName = "test";
- const collName = "foo";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const conn = new Mongo(rst.getPrimary().host);
-
- // Create the collection so the override doesn't try to when it is not expected.
- assert.commandWorked(conn.getDB(dbName).createCollection(collName));
-
- // Override runCommand to add each command it sees to a global array that can be inspected by
- // this test and to allow mocking certain responses.
- let cmdObjsSeen = [];
- let mockNetworkError, mockFirstResponse, mockFirstCommitResponse;
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjsSeen.push(cmdObj);
-
- if (mockNetworkError) {
- mockNetworkError = undefined;
- throw new Error("network error");
- }
-
- if (mockFirstResponse) {
- const mockedRes = mockFirstResponse;
- mockFirstResponse = undefined;
- return mockedRes;
- }
-
- const cmdName = Object.keys(cmdObj)[0];
- if (cmdName === "commitTransaction" && mockFirstCommitResponse) {
- const mockedRes = mockFirstCommitResponse;
- mockFirstCommitResponse = undefined;
- return mockedRes;
- }
-
- return mongoRunCommandOriginal.apply(this, arguments);
- };
-
- // Runs the given function with a collection from a session made with the sessionOptions on
- // TestData and asserts the seen commands that would start a transaction have or do not have
- // afterClusterTime.
- function inspectFirstCommandForAfterClusterTime(conn, cmdName, isCausal, expectRetry, func) {
- const session = conn.startSession(TestData.sessionOptions);
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
-
- cmdObjsSeen = [];
- func(sessionColl);
-
- // Find all requests sent with the expected command name, in case the scenario allows
- // retrying more than once or expects to end with a commit.
- let cmds = [];
- if (!expectRetry) {
- assert.eq(1, cmdObjsSeen.length);
- cmds.push(cmdObjsSeen[0]);
- } else {
- assert.lt(1, cmdObjsSeen.length);
- cmds = cmdObjsSeen.filter(obj => Object.keys(obj)[0] === cmdName);
- }
-
- for (let cmd of cmds) {
- if (isCausal) {
- assert(cmd.hasOwnProperty("$clusterTime"),
- "Expected " + tojson(cmd) + " to have a $clusterTime.");
- assert(cmd.hasOwnProperty("readConcern"),
- "Expected " + tojson(cmd) + " to have a read concern.");
- assert(cmd.readConcern.hasOwnProperty("afterClusterTime"),
- "Expected " + tojson(cmd) + " to have an afterClusterTime.");
- } else {
- if (TestData.hasOwnProperty("enableMajorityReadConcern") &&
- TestData.enableMajorityReadConcern === false) {
- // Commands not allowed in a transaction without causal consistency will not
- // have a read concern on variants that don't enable majority read concern.
- continue;
- }
-
- assert(cmd.hasOwnProperty("readConcern"),
- "Expected " + tojson(cmd) + " to have a read concern.");
- assert(!cmd.readConcern.hasOwnProperty("afterClusterTime"),
- "Expected " + tojson(cmd) + " to not have an afterClusterTime.");
- }
- }
-
- // Run a command not runnable in a transaction to reset the override's transaction state.
- assert.commandWorked(sessionDB.runCommand({ping: 1}));
-
- session.endSession();
+"use strict";
+
+const dbName = "test";
+const collName = "foo";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const conn = new Mongo(rst.getPrimary().host);
+
+// Create the collection so the override doesn't try to when it is not expected.
+assert.commandWorked(conn.getDB(dbName).createCollection(collName));
+
+// Override runCommand to add each command it sees to a global array that can be inspected by
+// this test and to allow mocking certain responses.
+let cmdObjsSeen = [];
+let mockNetworkError, mockFirstResponse, mockFirstCommitResponse;
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjsSeen.push(cmdObj);
+
+ if (mockNetworkError) {
+ mockNetworkError = undefined;
+ throw new Error("network error");
}
- // Helper methods for testing specific commands.
-
- function testInsert(conn, isCausal, expectRetry) {
- inspectFirstCommandForAfterClusterTime(conn, "insert", isCausal, expectRetry, (coll) => {
- assert.writeOK(coll.insert({x: 1}));
- });
+ if (mockFirstResponse) {
+ const mockedRes = mockFirstResponse;
+ mockFirstResponse = undefined;
+ return mockedRes;
}
- function testFind(conn, isCausal, expectRetry) {
- inspectFirstCommandForAfterClusterTime(conn, "find", isCausal, expectRetry, (coll) => {
- assert.eq(0, coll.find({y: 1}).itcount());
- });
+ const cmdName = Object.keys(cmdObj)[0];
+ if (cmdName === "commitTransaction" && mockFirstCommitResponse) {
+ const mockedRes = mockFirstCommitResponse;
+ mockFirstCommitResponse = undefined;
+ return mockedRes;
}
- function testCount(conn, isCausal, expectRetry) {
- inspectFirstCommandForAfterClusterTime(conn, "count", isCausal, expectRetry, (coll) => {
- assert.eq(0, coll.count({y: 1}));
- });
+ return mongoRunCommandOriginal.apply(this, arguments);
+};
+
+// Runs the given function with a collection from a session made with the sessionOptions on
+// TestData and asserts the seen commands that would start a transaction have or do not have
+// afterClusterTime.
+function inspectFirstCommandForAfterClusterTime(conn, cmdName, isCausal, expectRetry, func) {
+ const session = conn.startSession(TestData.sessionOptions);
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB[collName];
+
+ cmdObjsSeen = [];
+ func(sessionColl);
+
+ // Find all requests sent with the expected command name, in case the scenario allows
+ // retrying more than once or expects to end with a commit.
+ let cmds = [];
+ if (!expectRetry) {
+ assert.eq(1, cmdObjsSeen.length);
+ cmds.push(cmdObjsSeen[0]);
+ } else {
+ assert.lt(1, cmdObjsSeen.length);
+ cmds = cmdObjsSeen.filter(obj => Object.keys(obj)[0] === cmdName);
}
- function testCommit(conn, isCausal, expectRetry) {
- inspectFirstCommandForAfterClusterTime(conn, "find", isCausal, expectRetry, (coll) => {
- assert.eq(0, coll.find({y: 1}).itcount());
- assert.commandWorked(coll.getDB().runCommand({ping: 1})); // commits the transaction.
- });
- }
+ for (let cmd of cmds) {
+ if (isCausal) {
+ assert(cmd.hasOwnProperty("$clusterTime"),
+ "Expected " + tojson(cmd) + " to have a $clusterTime.");
+ assert(cmd.hasOwnProperty("readConcern"),
+ "Expected " + tojson(cmd) + " to have a read concern.");
+ assert(cmd.readConcern.hasOwnProperty("afterClusterTime"),
+ "Expected " + tojson(cmd) + " to have an afterClusterTime.");
+ } else {
+ if (TestData.hasOwnProperty("enableMajorityReadConcern") &&
+ TestData.enableMajorityReadConcern === false) {
+ // Commands not allowed in a transaction without causal consistency will not
+ // have a read concern on variants that don't enable majority read concern.
+ continue;
+ }
- // Load the txn_override after creating the spy, so the spy will see commands after being
- // transformed by the override. Also configure network error retries because several suites use
- // both.
- TestData.networkErrorAndTxnOverrideConfig = {
- wrapCRUDinTransactions: true,
- retryOnNetworkErrors: true
- };
- load("jstests/libs/override_methods/network_error_and_txn_override.js");
-
- TestData.logRetryAttempts = true;
-
- // Run a command to guarantee operation time is initialized on the database's session.
- assert.commandWorked(conn.getDB(dbName).runCommand({ping: 1}));
-
- function runTest() {
- for (let isCausal of[false, true]) {
- jsTestLog("Testing with isCausal = " + isCausal);
- TestData.sessionOptions = {causalConsistency: isCausal};
-
- // Commands that accept read and write concern allowed in a transaction.
- testInsert(conn, isCausal, false /*expectRetry*/);
- testFind(conn, isCausal, false /*expectRetry*/);
-
- // Command that can accept read concern not allowed in a transaction.
- testCount(conn, isCausal, false /*expectRetry*/);
-
- // Command that attempts to implicitly create a collection.
- conn.getDB(dbName)[collName].drop();
- testInsert(conn, isCausal, true /*expectRetry*/);
-
- // Command that can accept read concern with retryable error.
- mockFirstResponse = {ok: 0, code: ErrorCodes.CursorKilled};
- testFind(conn, isCausal, true /*expectRetry*/);
-
- // Commands that can accept read and write concern with network error.
- mockNetworkError = true;
- testInsert(conn, isCausal, true /*expectRetry*/);
-
- mockNetworkError = true;
- testFind(conn, isCausal, true /*expectRetry*/);
-
- // Command that can accept read concern not allowed in a transaction with network error.
- mockNetworkError = true;
- testCount(conn, isCausal, true /*expectRetry*/);
-
- // Commands that can accept read and write concern with transient transaction error.
- mockFirstResponse = {
- ok: 0,
- code: ErrorCodes.NoSuchTransaction,
- errorLabels: ["TransientTransactionError"]
- };
- testFind(conn, isCausal, true /*expectRetry*/);
-
- mockFirstResponse = {
- ok: 0,
- code: ErrorCodes.NoSuchTransaction,
- errorLabels: ["TransientTransactionError"]
- };
- testInsert(conn, isCausal, true /*expectRetry*/);
-
- // Transient transaction error on commit attempt.
- mockFirstCommitResponse = {
- ok: 0,
- code: ErrorCodes.NoSuchTransaction,
- errorLabels: ["TransientTransactionError"]
- };
- testCommit(conn, isCausal, true /*expectRetry*/);
-
- // Network error on commit attempt.
- mockFirstCommitResponse = {ok: 0, code: ErrorCodes.NotMaster};
- testCommit(conn, isCausal, true /*expectRetry*/);
+ assert(cmd.hasOwnProperty("readConcern"),
+ "Expected " + tojson(cmd) + " to have a read concern.");
+ assert(!cmd.readConcern.hasOwnProperty("afterClusterTime"),
+ "Expected " + tojson(cmd) + " to not have an afterClusterTime.");
}
}
- runTest();
+ // Run a command not runnable in a transaction to reset the override's transaction state.
+ assert.commandWorked(sessionDB.runCommand({ping: 1}));
+
+ session.endSession();
+}
+
+// Helper methods for testing specific commands.
+
+function testInsert(conn, isCausal, expectRetry) {
+ inspectFirstCommandForAfterClusterTime(conn, "insert", isCausal, expectRetry, (coll) => {
+ assert.writeOK(coll.insert({x: 1}));
+ });
+}
+
+function testFind(conn, isCausal, expectRetry) {
+ inspectFirstCommandForAfterClusterTime(conn, "find", isCausal, expectRetry, (coll) => {
+ assert.eq(0, coll.find({y: 1}).itcount());
+ });
+}
+
+function testCount(conn, isCausal, expectRetry) {
+ inspectFirstCommandForAfterClusterTime(conn, "count", isCausal, expectRetry, (coll) => {
+ assert.eq(0, coll.count({y: 1}));
+ });
+}
+
+function testCommit(conn, isCausal, expectRetry) {
+ inspectFirstCommandForAfterClusterTime(conn, "find", isCausal, expectRetry, (coll) => {
+ assert.eq(0, coll.find({y: 1}).itcount());
+ assert.commandWorked(coll.getDB().runCommand({ping: 1})); // commits the transaction.
+ });
+}
+
+// Load the txn_override after creating the spy, so the spy will see commands after being
+// transformed by the override. Also configure network error retries because several suites use
+// both.
+TestData.networkErrorAndTxnOverrideConfig = {
+ wrapCRUDinTransactions: true,
+ retryOnNetworkErrors: true
+};
+load("jstests/libs/override_methods/network_error_and_txn_override.js");
+
+TestData.logRetryAttempts = true;
+
+// Run a command to guarantee operation time is initialized on the database's session.
+assert.commandWorked(conn.getDB(dbName).runCommand({ping: 1}));
+
+function runTest() {
+ for (let isCausal of [false, true]) {
+ jsTestLog("Testing with isCausal = " + isCausal);
+ TestData.sessionOptions = {causalConsistency: isCausal};
+
+ // Commands that accept read and write concern allowed in a transaction.
+ testInsert(conn, isCausal, false /*expectRetry*/);
+ testFind(conn, isCausal, false /*expectRetry*/);
+
+ // Command that can accept read concern not allowed in a transaction.
+ testCount(conn, isCausal, false /*expectRetry*/);
+
+ // Command that attempts to implicitly create a collection.
+ conn.getDB(dbName)[collName].drop();
+ testInsert(conn, isCausal, true /*expectRetry*/);
+
+ // Command that can accept read concern with retryable error.
+ mockFirstResponse = {ok: 0, code: ErrorCodes.CursorKilled};
+ testFind(conn, isCausal, true /*expectRetry*/);
+
+ // Commands that can accept read and write concern with network error.
+ mockNetworkError = true;
+ testInsert(conn, isCausal, true /*expectRetry*/);
+
+ mockNetworkError = true;
+ testFind(conn, isCausal, true /*expectRetry*/);
+
+ // Command that can accept read concern not allowed in a transaction with network error.
+ mockNetworkError = true;
+ testCount(conn, isCausal, true /*expectRetry*/);
+
+ // Commands that can accept read and write concern with transient transaction error.
+ mockFirstResponse = {
+ ok: 0,
+ code: ErrorCodes.NoSuchTransaction,
+ errorLabels: ["TransientTransactionError"]
+ };
+ testFind(conn, isCausal, true /*expectRetry*/);
+
+ mockFirstResponse = {
+ ok: 0,
+ code: ErrorCodes.NoSuchTransaction,
+ errorLabels: ["TransientTransactionError"]
+ };
+ testInsert(conn, isCausal, true /*expectRetry*/);
+
+ // Transient transaction error on commit attempt.
+ mockFirstCommitResponse = {
+ ok: 0,
+ code: ErrorCodes.NoSuchTransaction,
+ errorLabels: ["TransientTransactionError"]
+ };
+ testCommit(conn, isCausal, true /*expectRetry*/);
+
+ // Network error on commit attempt.
+ mockFirstCommitResponse = {ok: 0, code: ErrorCodes.NotMaster};
+ testCommit(conn, isCausal, true /*expectRetry*/);
+ }
+}
+
+runTest();
- // With read concern majority disabled.
- TestData.enableMajorityReadConcern = false;
- runTest();
- delete TestData.enableMajorityReadConcern;
+// With read concern majority disabled.
+TestData.enableMajorityReadConcern = false;
+runTest();
+delete TestData.enableMajorityReadConcern;
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/umask.js b/jstests/noPassthrough/umask.js
index d8869f78f56..8d7234b15d3 100644
--- a/jstests/noPassthrough/umask.js
+++ b/jstests/noPassthrough/umask.js
@@ -5,74 +5,76 @@
* @tags: [ requires_wiredtiger ]
*/
(function() {
- 'use strict';
- // We only test this on POSIX since that's the only platform where umasks make sense
- if (_isWindows()) {
- return;
- }
+'use strict';
+// We only test this on POSIX since that's the only platform where umasks make sense
+if (_isWindows()) {
+ return;
+}
- const oldUmask = new Number(umask(0));
- jsTestLog("Setting umask to really permissive 000 mode, old mode was " + oldUmask.toString(8));
+const oldUmask = new Number(umask(0));
+jsTestLog("Setting umask to really permissive 000 mode, old mode was " + oldUmask.toString(8));
- const defaultUmask = Number.parseInt("600", 8);
- const permissiveUmask = Number.parseInt("666", 8);
+const defaultUmask = Number.parseInt("600", 8);
+const permissiveUmask = Number.parseInt("666", 8);
- // Any files that have some explicit permissions set on them should be added to this list
- const exceptions = [
- // The lock file gets created with explicit 644 permissions
- 'mongod.lock',
- // Mobile se files get created with 644 permissions when honoring the system umask
- 'mobile.sqlite',
- 'mobile.sqlite-shm',
- 'mobile.sqlite-wal',
- ];
+// Any files that have some explicit permissions set on them should be added to this list
+const exceptions = [
+ // The lock file gets created with explicit 644 permissions
+ 'mongod.lock',
+ // Mobile se files get created with 644 permissions when honoring the system umask
+ 'mobile.sqlite',
+ 'mobile.sqlite-shm',
+ 'mobile.sqlite-wal',
+];
- let mongodOptions = MongoRunner.mongodOptions({
- useLogFiles: true,
- cleanData: true,
- });
+let mongodOptions = MongoRunner.mongodOptions({
+ useLogFiles: true,
+ cleanData: true,
+});
- if (buildInfo()["modules"].some((mod) => {
- return mod == "enterprise";
- })) {
- mongodOptions.auditDestination = "file";
- mongodOptions.auditPath = mongodOptions.dbpath + "/audit.log";
- mongodOptions.auditFormat = "JSON";
- }
+if (buildInfo()["modules"].some((mod) => {
+ return mod == "enterprise";
+ })) {
+ mongodOptions.auditDestination = "file";
+ mongodOptions.auditPath = mongodOptions.dbpath + "/audit.log";
+ mongodOptions.auditFormat = "JSON";
+}
- const checkMask = (topDir, expected, honoringUmask) => {
- const maybeNot = honoringUmask ? "" : " not";
- const processDirectory = (dir) => {
- jsTestLog(`Checking ${dir}`);
- ls(dir).forEach((file) => {
- if (file.endsWith("/")) {
- return processDirectory(file);
- } else if (exceptions.some((exception) => {
- return file.endsWith(exception);
- })) {
- return;
- }
- const mode = new Number(getFileMode(file));
- const modeStr = mode.toString(8);
- const msg = `Mode for ${file} is ${modeStr} when${maybeNot} honoring system umask`;
- assert.eq(mode.valueOf(), expected, msg);
- });
- };
-
- processDirectory(topDir);
+const checkMask = (topDir, expected, honoringUmask) => {
+ const maybeNot = honoringUmask ? "" : " not";
+ const processDirectory = (dir) => {
+ jsTestLog(`Checking ${dir}`);
+ ls(dir).forEach((file) => {
+ if (file.endsWith("/")) {
+ return processDirectory(file);
+ } else if (exceptions.some((exception) => {
+ return file.endsWith(exception);
+ })) {
+ return;
+ }
+ const mode = new Number(getFileMode(file));
+ const modeStr = mode.toString(8);
+ const msg = `Mode for ${file} is ${modeStr} when${maybeNot} honoring system umask`;
+ assert.eq(mode.valueOf(), expected, msg);
+ });
};
- // First we start up the mongod normally, all the files except mongod.lock should have the mode
- // 0600
- let conn = MongoRunner.runMongod(mongodOptions);
- MongoRunner.stopMongod(conn);
- checkMask(conn.fullOptions.dbpath, defaultUmask, false);
+ processDirectory(topDir);
+};
+
+// First we start up the mongod normally, all the files except mongod.lock should have the mode
+// 0600
+let conn = MongoRunner.runMongod(mongodOptions);
+MongoRunner.stopMongod(conn);
+checkMask(conn.fullOptions.dbpath, defaultUmask, false);
- // Restart the mongod with honorSystemUmask, all files should have the mode 0666
- mongodOptions.setParameter = {honorSystemUmask: true};
- conn = MongoRunner.runMongod(mongodOptions);
- MongoRunner.stopMongod(conn);
- checkMask(conn.fullOptions.dbpath, permissiveUmask, false);
+// Restart the mongod with honorSystemUmask, all files should have the mode 0666
+mongodOptions.setParameter = {
+ honorSystemUmask: true
+};
+conn = MongoRunner.runMongod(mongodOptions);
+MongoRunner.stopMongod(conn);
+checkMask(conn.fullOptions.dbpath, permissiveUmask, false);
- umask(oldUmask.valueOf());
+umask(oldUmask.valueOf());
})();
diff --git a/jstests/noPassthrough/unix_socket.js b/jstests/noPassthrough/unix_socket.js
index ff1a18afadf..fc1ad2abf58 100644
--- a/jstests/noPassthrough/unix_socket.js
+++ b/jstests/noPassthrough/unix_socket.js
@@ -10,112 +10,112 @@
*/
//@tags: [requires_sharding]
(function() {
- 'use strict';
- // This test will only work on POSIX machines.
- if (_isWindows()) {
- return;
- }
-
- // Do not fail if this test leaves unterminated processes because testSockOptions
- // is expected to throw before it calls stopMongod.
- TestData.failIfUnterminatedProcesses = false;
-
- var doesLogMatchRegex = function(logArray, regex) {
- for (let i = (logArray.length - 1); i >= 0; i--) {
- var regexInLine = regex.exec(logArray[i]);
- if (regexInLine != null) {
- return true;
- }
- }
- return false;
- };
-
- var checkSocket = function(path) {
- assert.eq(fileExists(path), true);
- var conn = new Mongo(path);
- assert.commandWorked(conn.getDB("admin").runCommand("ping"),
- `Expected ping command to succeed for ${path}`);
- };
-
- var testSockOptions = function(bindPath, expectSockPath, optDict, bindSep = ',', optMongos) {
- var optDict = optDict || {};
- if (bindPath) {
- optDict["bind_ip"] = `${MongoRunner.dataDir}/${bindPath}${bindSep}127.0.0.1`;
+'use strict';
+// This test will only work on POSIX machines.
+if (_isWindows()) {
+ return;
+}
+
+// Do not fail if this test leaves unterminated processes because testSockOptions
+// is expected to throw before it calls stopMongod.
+TestData.failIfUnterminatedProcesses = false;
+
+var doesLogMatchRegex = function(logArray, regex) {
+ for (let i = (logArray.length - 1); i >= 0; i--) {
+ var regexInLine = regex.exec(logArray[i]);
+ if (regexInLine != null) {
+ return true;
}
+ }
+ return false;
+};
+
+var checkSocket = function(path) {
+ assert.eq(fileExists(path), true);
+ var conn = new Mongo(path);
+ assert.commandWorked(conn.getDB("admin").runCommand("ping"),
+ `Expected ping command to succeed for ${path}`);
+};
+
+var testSockOptions = function(bindPath, expectSockPath, optDict, bindSep = ',', optMongos) {
+ var optDict = optDict || {};
+ if (bindPath) {
+ optDict["bind_ip"] = `${MongoRunner.dataDir}/${bindPath}${bindSep}127.0.0.1`;
+ }
- var conn, shards;
- if (optMongos) {
- shards = new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: optDict}});
- assert.neq(shards, null, "Expected cluster to start okay");
- conn = shards.s0;
- } else {
- conn = MongoRunner.runMongod(optDict);
- }
+ var conn, shards;
+ if (optMongos) {
+ shards = new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: optDict}});
+ assert.neq(shards, null, "Expected cluster to start okay");
+ conn = shards.s0;
+ } else {
+ conn = MongoRunner.runMongod(optDict);
+ }
- assert.neq(conn, null, `Expected ${optMongos ? "mongos" : "mongod"} to start okay`);
+ assert.neq(conn, null, `Expected ${optMongos ? "mongos" : "mongod"} to start okay`);
- const defaultUNIXSocket = `/tmp/mongodb-${conn.port}.sock`;
- var checkPath = defaultUNIXSocket;
- if (expectSockPath) {
- checkPath = `${MongoRunner.dataDir}/${expectSockPath}`;
- }
+ const defaultUNIXSocket = `/tmp/mongodb-${conn.port}.sock`;
+ var checkPath = defaultUNIXSocket;
+ if (expectSockPath) {
+ checkPath = `${MongoRunner.dataDir}/${expectSockPath}`;
+ }
- checkSocket(checkPath);
+ checkSocket(checkPath);
- // Test the naming of the unix socket
- var log = conn.adminCommand({getLog: 'global'});
- assert.commandWorked(log, "Expected getting the log to work");
- var ll = log.log;
- var re = new RegExp("anonymous unix socket");
- assert(doesLogMatchRegex(ll, re), "Log message did not contain 'anonymous unix socket'");
+ // Test the naming of the unix socket
+ var log = conn.adminCommand({getLog: 'global'});
+ assert.commandWorked(log, "Expected getting the log to work");
+ var ll = log.log;
+ var re = new RegExp("anonymous unix socket");
+ assert(doesLogMatchRegex(ll, re), "Log message did not contain 'anonymous unix socket'");
- if (optMongos) {
- shards.stop();
- } else {
- MongoRunner.stopMongod(conn);
- }
+ if (optMongos) {
+ shards.stop();
+ } else {
+ MongoRunner.stopMongod(conn);
+ }
- assert.eq(fileExists(checkPath), false);
- };
-
- // Check that the default unix sockets work
- testSockOptions();
- testSockOptions(undefined, undefined, undefined, ',', true);
-
- // Check that a custom unix socket path works
- testSockOptions("testsock.socket", "testsock.socket");
- testSockOptions("testsock.socket", "testsock.socket", undefined, ',', true);
-
- // Check that a custom unix socket path works with spaces
- testSockOptions("test sock.socket", "test sock.socket");
- testSockOptions("test sock.socket", "test sock.socket", undefined, ',', true);
-
- // Check that a custom unix socket path works with spaces before the comma and after
- testSockOptions("testsock.socket ", "testsock.socket", undefined, ', ');
- testSockOptions("testsock.socket ", "testsock.socket", undefined, ', ', true);
-
- // Check that a bad UNIX path breaks
- assert.throws(function() {
- var badname = "a".repeat(200) + ".socket";
- testSockOptions(badname, badname);
- });
-
- // Check that if UNIX sockets are disabled that we aren't able to connect over UNIX sockets
- assert.throws(function() {
- testSockOptions(undefined, undefined, {nounixsocket: ""});
- });
-
- // Check the unixSocketPrefix option
- var socketPrefix = `${MongoRunner.dataDir}/socketdir`;
- mkdir(socketPrefix);
- var port = allocatePort();
- testSockOptions(
- undefined, `socketdir/mongodb-${port}.sock`, {unixSocketPrefix: socketPrefix, port: port});
-
- port = allocatePort();
- testSockOptions(undefined,
- `socketdir/mongodb-${port}.sock`,
- {unixSocketPrefix: socketPrefix, port: port},
- ',',
- true);
+ assert.eq(fileExists(checkPath), false);
+};
+
+// Check that the default unix sockets work
+testSockOptions();
+testSockOptions(undefined, undefined, undefined, ',', true);
+
+// Check that a custom unix socket path works
+testSockOptions("testsock.socket", "testsock.socket");
+testSockOptions("testsock.socket", "testsock.socket", undefined, ',', true);
+
+// Check that a custom unix socket path works with spaces
+testSockOptions("test sock.socket", "test sock.socket");
+testSockOptions("test sock.socket", "test sock.socket", undefined, ',', true);
+
+// Check that a custom unix socket path works with spaces before the comma and after
+testSockOptions("testsock.socket ", "testsock.socket", undefined, ', ');
+testSockOptions("testsock.socket ", "testsock.socket", undefined, ', ', true);
+
+// Check that a bad UNIX path breaks
+assert.throws(function() {
+ var badname = "a".repeat(200) + ".socket";
+ testSockOptions(badname, badname);
+});
+
+// Check that if UNIX sockets are disabled that we aren't able to connect over UNIX sockets
+assert.throws(function() {
+ testSockOptions(undefined, undefined, {nounixsocket: ""});
+});
+
+// Check the unixSocketPrefix option
+var socketPrefix = `${MongoRunner.dataDir}/socketdir`;
+mkdir(socketPrefix);
+var port = allocatePort();
+testSockOptions(
+ undefined, `socketdir/mongodb-${port}.sock`, {unixSocketPrefix: socketPrefix, port: port});
+
+port = allocatePort();
+testSockOptions(undefined,
+ `socketdir/mongodb-${port}.sock`,
+ {unixSocketPrefix: socketPrefix, port: port},
+ ',',
+ true);
})();
diff --git a/jstests/noPassthrough/unknown-set-parameter.js b/jstests/noPassthrough/unknown-set-parameter.js
index 1e72694e276..f5e6c2b10b8 100644
--- a/jstests/noPassthrough/unknown-set-parameter.js
+++ b/jstests/noPassthrough/unknown-set-parameter.js
@@ -1,36 +1,35 @@
// Verify error is produced when specifying an invalid set parameter.
(function() {
- 'use strict';
+'use strict';
- function tryRun(arg) {
- // runMongoProgram helpfully makes certain that we pass a port when invoking mongod.
- return runMongoProgram('./mongod', '--port', 0, '--setParameter', arg, '--outputConfig');
- }
+function tryRun(arg) {
+ // runMongoProgram helpfully makes certain that we pass a port when invoking mongod.
+ return runMongoProgram('./mongod', '--port', 0, '--setParameter', arg, '--outputConfig');
+}
- // Positive case, valid setparam.
- clearRawMongoProgramOutput();
- const valid = tryRun('enableTestCommands=1');
- assert.eq(valid, 0);
- const validOutput = rawMongoProgramOutput();
- assert.gte(validOutput.search(/enableTestCommands: 1/), 0, validOutput);
+// Positive case, valid setparam.
+clearRawMongoProgramOutput();
+const valid = tryRun('enableTestCommands=1');
+assert.eq(valid, 0);
+const validOutput = rawMongoProgramOutput();
+assert.gte(validOutput.search(/enableTestCommands: 1/), 0, validOutput);
- // Negative case, invalid setparam.
- clearRawMongoProgramOutput();
- const foo = tryRun('foo=bar');
- assert.neq(foo, 0);
- const fooOutput = rawMongoProgramOutput();
- assert.gte(fooOutput.search(/Unknown --setParameter 'foo'/), 0, fooOutput);
-
- // Negative case, valid but unavailable setparam.
- clearRawMongoProgramOutput();
- const graph = tryRun('roleGraphInvalidationIsFatal=true');
- assert.neq(graph, 0);
- const graphOutput = rawMongoProgramOutput();
- assert.gte(
- graphOutput.search(
- /--setParameter 'roleGraphInvalidationIsFatal' only available when used with 'enableTestCommands'/),
- 0,
- fooOutput);
+// Negative case, invalid setparam.
+clearRawMongoProgramOutput();
+const foo = tryRun('foo=bar');
+assert.neq(foo, 0);
+const fooOutput = rawMongoProgramOutput();
+assert.gte(fooOutput.search(/Unknown --setParameter 'foo'/), 0, fooOutput);
+// Negative case, valid but unavailable setparam.
+clearRawMongoProgramOutput();
+const graph = tryRun('roleGraphInvalidationIsFatal=true');
+assert.neq(graph, 0);
+const graphOutput = rawMongoProgramOutput();
+assert.gte(
+ graphOutput.search(
+ /--setParameter 'roleGraphInvalidationIsFatal' only available when used with 'enableTestCommands'/),
+ 0,
+ fooOutput);
}());
diff --git a/jstests/noPassthrough/unsupported_change_stream_deployments.js b/jstests/noPassthrough/unsupported_change_stream_deployments.js
index 3f24a8b0f2c..c342341da69 100644
--- a/jstests/noPassthrough/unsupported_change_stream_deployments.js
+++ b/jstests/noPassthrough/unsupported_change_stream_deployments.js
@@ -2,60 +2,59 @@
// @tags: [requires_sharding, uses_change_streams]
(function() {
- "use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- load("jstests/libs/feature_compatibility_version.js"); // For checkFCV.
+"use strict";
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+load("jstests/libs/feature_compatibility_version.js"); // For checkFCV.
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- function assertChangeStreamNotSupportedOnConnection(conn) {
- const notReplicaSetErrorCode = 40573;
- assertErrorCode(
- conn.getDB("test").non_existent, [{$changeStream: {}}], notReplicaSetErrorCode);
- assertErrorCode(conn.getDB("test").non_existent,
- [{$changeStream: {fullDocument: "updateLookup"}}],
- notReplicaSetErrorCode);
- }
+function assertChangeStreamNotSupportedOnConnection(conn) {
+ const notReplicaSetErrorCode = 40573;
+ assertErrorCode(conn.getDB("test").non_existent, [{$changeStream: {}}], notReplicaSetErrorCode);
+ assertErrorCode(conn.getDB("test").non_existent,
+ [{$changeStream: {fullDocument: "updateLookup"}}],
+ notReplicaSetErrorCode);
+}
- const conn = MongoRunner.runMongod({enableMajorityReadConcern: ""});
- assert.neq(null, conn, "mongod was unable to start up");
- // $changeStream cannot run on a non-existent database.
- assert.writeOK(conn.getDB("test").ensure_db_exists.insert({}));
- assertChangeStreamNotSupportedOnConnection(conn);
- assert.eq(0, MongoRunner.stopMongod(conn));
+const conn = MongoRunner.runMongod({enableMajorityReadConcern: ""});
+assert.neq(null, conn, "mongod was unable to start up");
+// $changeStream cannot run on a non-existent database.
+assert.writeOK(conn.getDB("test").ensure_db_exists.insert({}));
+assertChangeStreamNotSupportedOnConnection(conn);
+assert.eq(0, MongoRunner.stopMongod(conn));
- // Test a sharded cluster with standalone shards.
- // TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
- const clusterWithStandalones = new ShardingTest({
- shards: 2,
- other: {shardOptions: {enableMajorityReadConcern: ""}},
- config: 1,
- shardAsReplicaSet: false
- });
- // Make sure the database exists before running any commands.
- const mongosDB = clusterWithStandalones.getDB("test");
- // enableSharding will create the db at the cluster level but not on the shards. $changeStream
- // through mongoS will be allowed to run on the shards despite the lack of a database.
- assert.commandWorked(mongosDB.adminCommand({enableSharding: "test"}));
- assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.s);
- // Shard the 'ensure_db_exists' collection on a hashed key before running $changeStream on the
- // shards directly. This will ensure that the database is created on both shards.
- assert.commandWorked(
- mongosDB.adminCommand({shardCollection: "test.ensure_db_exists", key: {_id: "hashed"}}));
- assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard0);
- assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard1);
- clusterWithStandalones.stop();
+// Test a sharded cluster with standalone shards.
+// TODO: Remove 'shardAsReplicaSet: false' when SERVER-32672 is fixed.
+const clusterWithStandalones = new ShardingTest({
+ shards: 2,
+ other: {shardOptions: {enableMajorityReadConcern: ""}},
+ config: 1,
+ shardAsReplicaSet: false
+});
+// Make sure the database exists before running any commands.
+const mongosDB = clusterWithStandalones.getDB("test");
+// enableSharding will create the db at the cluster level but not on the shards. $changeStream
+// through mongoS will be allowed to run on the shards despite the lack of a database.
+assert.commandWorked(mongosDB.adminCommand({enableSharding: "test"}));
+assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.s);
+// Shard the 'ensure_db_exists' collection on a hashed key before running $changeStream on the
+// shards directly. This will ensure that the database is created on both shards.
+assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: "test.ensure_db_exists", key: {_id: "hashed"}}));
+assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard0);
+assertChangeStreamNotSupportedOnConnection(clusterWithStandalones.shard1);
+clusterWithStandalones.stop();
}());
diff --git a/jstests/noPassthrough/update_now_clustertime_replset.js b/jstests/noPassthrough/update_now_clustertime_replset.js
index 65503d7d5c3..fe3db8e4512 100644
--- a/jstests/noPassthrough/update_now_clustertime_replset.js
+++ b/jstests/noPassthrough/update_now_clustertime_replset.js
@@ -8,169 +8,126 @@
* @tags: [requires_find_command, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({name: jsTestName(), nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({name: jsTestName(), nodes: 1});
+rst.startSet();
+rst.initiate();
- const db = rst.getPrimary().getDB(jsTestName());
- const otherColl = db.other;
- const coll = db.test;
- otherColl.drop();
- coll.drop();
+const db = rst.getPrimary().getDB(jsTestName());
+const otherColl = db.other;
+const coll = db.test;
+otherColl.drop();
+coll.drop();
- // Insert N docs, with the _id field set to the current Date. We sleep for a short period
- // between insertions, such that the Date value increases for each successive document.
- let bulk = coll.initializeUnorderedBulkOp();
- const _idStart = new Date();
- const numDocs = 10;
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({_id: new Date(), insertClusterTime: new Timestamp(0, 0)});
- if (i < numDocs - 1) {
- sleep(100);
- }
+// Insert N docs, with the _id field set to the current Date. We sleep for a short period
+// between insertions, such that the Date value increases for each successive document.
+let bulk = coll.initializeUnorderedBulkOp();
+const _idStart = new Date();
+const numDocs = 10;
+for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: new Date(), insertClusterTime: new Timestamp(0, 0)});
+ if (i < numDocs - 1) {
+ sleep(100);
}
- const _idEnd = new Date();
-
- assert.commandWorked(bulk.execute());
-
- // Test that $$NOW and $$CLUSTER_TIME are available and remain constant across all updated
- // documents.
- let writeResult =
- assert.commandWorked(coll.update({$where: "sleep(10); return true"},
- [{$addFields: {now: "$$NOW", ctime: "$$CLUSTER_TIME"}}],
- {multi: true}));
+}
+const _idEnd = new Date();
- assert.eq(writeResult.nMatched, numDocs);
- assert.eq(writeResult.nModified, numDocs);
+assert.commandWorked(bulk.execute());
- let results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now instanceof Date);
- assert(results[0].ctime instanceof Timestamp);
- for (let result of results) {
- assert.eq(result.now, results[0].now);
- assert.eq(result.ctime, results[0].ctime);
- }
+// Test that $$NOW and $$CLUSTER_TIME are available and remain constant across all updated
+// documents.
+let writeResult =
+ assert.commandWorked(coll.update({$where: "sleep(10); return true"},
+ [{$addFields: {now: "$$NOW", ctime: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- // Test that $$NOW and $$CLUSTER_TIME advance between updates but remain constant across all
- // updates in a given batch.
- writeResult = assert.commandWorked(db.runCommand({
- update: coll.getName(),
- updates: [
- {
- q: {$where: "sleep(10); return true"},
- u: [{$addFields: {now2: "$$NOW", ctime2: "$$CLUSTER_TIME"}}],
- multi: true
- },
- {
- q: {$where: "sleep(10); return true"},
- u: [{$addFields: {now3: "$$NOW", ctime3: "$$CLUSTER_TIME"}}],
- multi: true
- }
- ]
- }));
+assert.eq(writeResult.nMatched, numDocs);
+assert.eq(writeResult.nModified, numDocs);
- assert.eq(writeResult.n, numDocs * 2);
- assert.eq(writeResult.nModified, numDocs * 2);
+let results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now instanceof Date);
+assert(results[0].ctime instanceof Timestamp);
+for (let result of results) {
+ assert.eq(result.now, results[0].now);
+ assert.eq(result.ctime, results[0].ctime);
+}
- results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now2 instanceof Date);
- assert(results[0].ctime2 instanceof Timestamp);
- for (let result of results) {
- // The now2 and ctime2 fields are greater than the values from the previous update.
- assert.gt(result.now2, result.now);
- assert.gt(result.ctime2, result.ctime);
- // The now2 and ctime2 fields are the same across all documents.
- assert.eq(result.now2, results[0].now2);
- assert.eq(result.ctime2, results[0].ctime2);
- // The now2 and ctime2 fields are the same as now3 and ctime3 across all documents.
- assert.eq(result.now2, result.now3);
- assert.eq(result.ctime2, result.ctime3);
- }
-
- // Test that $$NOW and $$CLUSTER_TIME can be used in the query portion of an update.
- const _idMidpoint = new Date(_idStart.getTime() + (_idEnd.getTime() - _idStart.getTime()) / 2);
- writeResult =
- assert.commandWorked(coll.update({
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
+// Test that $$NOW and $$CLUSTER_TIME advance between updates but remain constant across all
+// updates in a given batch.
+writeResult = assert.commandWorked(db.runCommand({
+ update: coll.getName(),
+ updates: [
+ {
+ q: {$where: "sleep(10); return true"},
+ u: [{$addFields: {now2: "$$NOW", ctime2: "$$CLUSTER_TIME"}}],
+ multi: true
},
- [{$addFields: {now4: "$$NOW", ctime4: "$$CLUSTER_TIME"}}],
- {multi: true}));
-
- assert.lt(writeResult.nMatched, numDocs);
- assert.lt(writeResult.nModified, numDocs);
-
- results = coll.find().sort({_id: 1}).toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now4 instanceof Date);
- assert(results[0].ctime4 instanceof Timestamp);
- for (let result of results) {
- if (result._id.getTime() < _idMidpoint.getTime()) {
- assert.eq(result.now4, results[0].now4);
- assert.eq(result.ctime4, results[0].ctime4);
- assert.gt(result.now4, result.now3);
- assert.gt(result.ctime4, result.ctime3);
- } else {
- assert.eq(result.now4, undefined);
- assert.eq(result.ctime4, undefined);
+ {
+ q: {$where: "sleep(10); return true"},
+ u: [{$addFields: {now3: "$$NOW", ctime3: "$$CLUSTER_TIME"}}],
+ multi: true
}
- }
+ ]
+}));
- // Test that we can explain() an update command that uses $$NOW and $$CLUSTER_TIME.
- assert.commandWorked(
- coll.explain().update(
- {
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
- {multi: true}));
+assert.eq(writeResult.n, numDocs * 2);
+assert.eq(writeResult.nModified, numDocs * 2);
- // Test that $$NOW and $$CLUSTER_TIME can be used when issuing updates via the Bulk API, and
- // remain constant across all updates within a single bulk operation.
- // TODO SERVER-41174: Note that if the bulk update operation exceeds the maximum BSON command
- // size, it may issue two or more separate update commands. $$NOW and $$CLUSTER_TIME will be
- // constant within each update command, but not across commands.
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({$where: "sleep(10); return true"}).update([
- {$addFields: {now5: "$$NOW", ctime5: "$$CLUSTER_TIME"}}
- ]);
- bulk.find({$where: "sleep(10); return true"}).update([
- {$addFields: {now6: "$$NOW", ctime6: "$$CLUSTER_TIME"}}
- ]);
- writeResult = assert.commandWorked(bulk.execute());
+results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now2 instanceof Date);
+assert(results[0].ctime2 instanceof Timestamp);
+for (let result of results) {
+ // The now2 and ctime2 fields are greater than the values from the previous update.
+ assert.gt(result.now2, result.now);
+ assert.gt(result.ctime2, result.ctime);
+ // The now2 and ctime2 fields are the same across all documents.
+ assert.eq(result.now2, results[0].now2);
+ assert.eq(result.ctime2, results[0].ctime2);
+ // The now2 and ctime2 fields are the same as now3 and ctime3 across all documents.
+ assert.eq(result.now2, result.now3);
+ assert.eq(result.ctime2, result.ctime3);
+}
- assert.eq(writeResult.nMatched, numDocs * 2);
- assert.eq(writeResult.nModified, numDocs * 2);
+// Test that $$NOW and $$CLUSTER_TIME can be used in the query portion of an update.
+const _idMidpoint = new Date(_idStart.getTime() + (_idEnd.getTime() - _idStart.getTime()) / 2);
+writeResult =
+ assert.commandWorked(coll.update({
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ [{$addFields: {now4: "$$NOW", ctime4: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now5 instanceof Date);
- assert(results[0].ctime5 instanceof Timestamp);
- for (let result of results) {
- // The now5 and ctime5 fields are the same across all documents.
- assert.eq(result.now5, results[0].now5);
- assert.eq(result.ctime5, results[0].ctime5);
- // The now5 and ctime5 fields are the same as now6 and ctime6 across all documents.
- assert.eq(result.now5, result.now6);
- assert.eq(result.ctime5, result.ctime6);
+assert.lt(writeResult.nMatched, numDocs);
+assert.lt(writeResult.nModified, numDocs);
+
+results = coll.find().sort({_id: 1}).toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now4 instanceof Date);
+assert(results[0].ctime4 instanceof Timestamp);
+for (let result of results) {
+ if (result._id.getTime() < _idMidpoint.getTime()) {
+ assert.eq(result.now4, results[0].now4);
+ assert.eq(result.ctime4, results[0].ctime4);
+ assert.gt(result.now4, result.now3);
+ assert.gt(result.ctime4, result.ctime3);
+ } else {
+ assert.eq(result.now4, undefined);
+ assert.eq(result.ctime4, undefined);
}
+}
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify query and update.
- let returnedDoc = coll.findAndModify({
- query: {
+// Test that we can explain() an update command that uses $$NOW and $$CLUSTER_TIME.
+assert.commandWorked(
+ coll.explain().update(
+ {
$expr: {
$and: [
{$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
@@ -178,113 +135,152 @@
]
}
},
- update: [{$addFields: {nowFAM: "$$NOW", ctimeFAM: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- new: true
- });
- assert(returnedDoc.nowFAM instanceof Date);
- assert(returnedDoc.ctimeFAM instanceof Timestamp);
- assert.gt(returnedDoc.nowFAM, returnedDoc.now4);
- assert.gt(returnedDoc.ctimeFAM, returnedDoc.ctime4);
+ [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- results = coll.find({nowFAM: {$exists: true}, ctimeFAM: {$exists: true}}).toArray();
- assert.eq(results.length, 1);
- assert.docEq(results[0], returnedDoc);
+// Test that $$NOW and $$CLUSTER_TIME can be used when issuing updates via the Bulk API, and
+// remain constant across all updates within a single bulk operation.
+// TODO SERVER-41174: Note that if the bulk update operation exceeds the maximum BSON command
+// size, it may issue two or more separate update commands. $$NOW and $$CLUSTER_TIME will be
+// constant within each update command, but not across commands.
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({$where: "sleep(10); return true"}).update([
+ {$addFields: {now5: "$$NOW", ctime5: "$$CLUSTER_TIME"}}
+]);
+bulk.find({$where: "sleep(10); return true"}).update([
+ {$addFields: {now6: "$$NOW", ctime6: "$$CLUSTER_TIME"}}
+]);
+writeResult = assert.commandWorked(bulk.execute());
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify upsert.
- returnedDoc = coll.findAndModify({
- query: {fieldDoesNotExist: {$exists: true}},
- update:
- [{$addFields: {_id: "$$NOW", nowFAMUpsert: "$$NOW", ctimeFAMUpsert: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- upsert: true,
- new: true
- });
- assert(returnedDoc.nowFAMUpsert instanceof Date);
- assert(returnedDoc.ctimeFAMUpsert instanceof Timestamp);
+assert.eq(writeResult.nMatched, numDocs * 2);
+assert.eq(writeResult.nModified, numDocs * 2);
- assert.eq(coll.find().itcount(), numDocs + 1);
- results = coll.find({nowFAMUpsert: {$exists: true}, ctimeFAMUpsert: {$exists: true}}).toArray();
- assert.eq(results.length, 1);
- assert.docEq(results[0], returnedDoc);
+results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now5 instanceof Date);
+assert(results[0].ctime5 instanceof Timestamp);
+for (let result of results) {
+ // The now5 and ctime5 fields are the same across all documents.
+ assert.eq(result.now5, results[0].now5);
+ assert.eq(result.ctime5, results[0].ctime5);
+ // The now5 and ctime5 fields are the same as now6 and ctime6 across all documents.
+ assert.eq(result.now5, result.now6);
+ assert.eq(result.ctime5, result.ctime6);
+}
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify delete.
- returnedDoc = coll.findAndModify({
- query: {
- nowFAMUpsert: {$exists: true},
- ctimeFAMUpsert: {$exists: true},
- $expr: {
- $and: [
- {$lt: ["$nowFAMUpsert", "$$NOW"]},
- {$gt: ["$$CLUSTER_TIME", "$ctimeFAMUpsert"]}
- ]
- }
- },
- sort: {_id: 1},
- remove: true
- });
- assert.eq(coll.find({nowFAMUpsert: {$exists: true}}).itcount(), 0);
- assert.eq(coll.find().itcount(), numDocs);
- assert.neq(returnedDoc, null);
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify query and update.
+let returnedDoc = coll.findAndModify({
+ query: {
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ update: [{$addFields: {nowFAM: "$$NOW", ctimeFAM: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ new: true
+});
+assert(returnedDoc.nowFAM instanceof Date);
+assert(returnedDoc.ctimeFAM instanceof Timestamp);
+assert.gt(returnedDoc.nowFAM, returnedDoc.now4);
+assert.gt(returnedDoc.ctimeFAM, returnedDoc.ctime4);
- // Test that we can explain() a findAndModify command that uses $$NOW and $$CLUSTER_TIME.
- assert.commandWorked(coll.explain().findAndModify({
- query: {
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- update:
- [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- new: true
- }));
+results = coll.find({nowFAM: {$exists: true}, ctimeFAM: {$exists: true}}).toArray();
+assert.eq(results.length, 1);
+assert.docEq(results[0], returnedDoc);
- // Test that we can use $$NOW and $$CLUSTER_TIME in an update via a $merge aggregation. We first
- // use $merge to copy the current contents of 'coll' into 'otherColl'.
- assert.commandWorked(db.createCollection(otherColl.getName()));
- assert.doesNotThrow(() => coll.aggregate([
- {$merge: {into: otherColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
- ]));
- // Run an aggregation which adds $$NOW and $$CLUSTER_TIME fields into the pipeline document,
- // then do the same to the documents in the output collection via a pipeline update.
- assert.doesNotThrow(() => coll.aggregate([
- {$addFields: {aggNow: "$$NOW", aggCT: "$$CLUSTER_TIME"}},
- {
- $merge: {
- into: otherColl.getName(),
- let : {aggNow: "$aggNow", aggCT: "$aggCT"},
- whenMatched: [{
- $addFields: {
- aggNow: "$$aggNow",
- aggCT: "$$aggCT",
- mergeNow: "$$NOW",
- mergeCT: "$$CLUSTER_TIME"
- }
- }],
- whenNotMatched: "fail"
- }
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify upsert.
+returnedDoc = coll.findAndModify({
+ query: {fieldDoesNotExist: {$exists: true}},
+ update: [{$addFields: {_id: "$$NOW", nowFAMUpsert: "$$NOW", ctimeFAMUpsert: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ upsert: true,
+ new: true
+});
+assert(returnedDoc.nowFAMUpsert instanceof Date);
+assert(returnedDoc.ctimeFAMUpsert instanceof Timestamp);
+
+assert.eq(coll.find().itcount(), numDocs + 1);
+results = coll.find({nowFAMUpsert: {$exists: true}, ctimeFAMUpsert: {$exists: true}}).toArray();
+assert.eq(results.length, 1);
+assert.docEq(results[0], returnedDoc);
+
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify delete.
+returnedDoc = coll.findAndModify({
+ query: {
+ nowFAMUpsert: {$exists: true},
+ ctimeFAMUpsert: {$exists: true},
+ $expr: {
+ $and:
+ [{$lt: ["$nowFAMUpsert", "$$NOW"]}, {$gt: ["$$CLUSTER_TIME", "$ctimeFAMUpsert"]}]
+ }
+ },
+ sort: {_id: 1},
+ remove: true
+});
+assert.eq(coll.find({nowFAMUpsert: {$exists: true}}).itcount(), 0);
+assert.eq(coll.find().itcount(), numDocs);
+assert.neq(returnedDoc, null);
+
+// Test that we can explain() a findAndModify command that uses $$NOW and $$CLUSTER_TIME.
+assert.commandWorked(coll.explain().findAndModify({
+ query: {
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ update: [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ new: true
+}));
+
+// Test that we can use $$NOW and $$CLUSTER_TIME in an update via a $merge aggregation. We first
+// use $merge to copy the current contents of 'coll' into 'otherColl'.
+assert.commandWorked(db.createCollection(otherColl.getName()));
+assert.doesNotThrow(
+ () => coll.aggregate(
+ [{$merge: {into: otherColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]));
+// Run an aggregation which adds $$NOW and $$CLUSTER_TIME fields into the pipeline document,
+// then do the same to the documents in the output collection via a pipeline update.
+assert.doesNotThrow(() => coll.aggregate([
+ {$addFields: {aggNow: "$$NOW", aggCT: "$$CLUSTER_TIME"}},
+ {
+ $merge: {
+ into: otherColl.getName(),
+ let : {aggNow: "$aggNow", aggCT: "$aggCT"},
+ whenMatched: [{
+ $addFields: {
+ aggNow: "$$aggNow",
+ aggCT: "$$aggCT",
+ mergeNow: "$$NOW",
+ mergeCT: "$$CLUSTER_TIME"
+ }
+ }],
+ whenNotMatched: "fail"
}
- ]));
- // Verify that the agg pipeline's $$NOW and $$CLUSTER_TIME match the $merge update pipeline's.
- results = otherColl.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].mergeNow instanceof Date);
- assert(results[0].mergeCT instanceof Timestamp);
- for (let result of results) {
- // The mergeNow and mergeCT fields are greater than the values from the previous updates.
- assert.gt(result.mergeNow, result.now5);
- assert.gt(result.mergeCT, result.ctime5);
- // The mergeNow and mergeCT fields are the same across all documents.
- assert.eq(result.mergeNow, results[0].mergeNow);
- assert.eq(result.mergeCT, results[0].mergeCT);
- // The mergeNow and mergeCT fields are the same as aggNow and aggCT across all documents.
- assert.eq(result.mergeNow, result.aggNow);
- assert.eq(result.mergeCT, result.aggCT);
}
+]));
+// Verify that the agg pipeline's $$NOW and $$CLUSTER_TIME match the $merge update pipeline's.
+results = otherColl.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].mergeNow instanceof Date);
+assert(results[0].mergeCT instanceof Timestamp);
+for (let result of results) {
+ // The mergeNow and mergeCT fields are greater than the values from the previous updates.
+ assert.gt(result.mergeNow, result.now5);
+ assert.gt(result.mergeCT, result.ctime5);
+ // The mergeNow and mergeCT fields are the same across all documents.
+ assert.eq(result.mergeNow, results[0].mergeNow);
+ assert.eq(result.mergeCT, results[0].mergeCT);
+ // The mergeNow and mergeCT fields are the same as aggNow and aggCT across all documents.
+ assert.eq(result.mergeNow, result.aggNow);
+ assert.eq(result.mergeCT, result.aggCT);
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/noPassthrough/update_now_clustertime_sharding.js b/jstests/noPassthrough/update_now_clustertime_sharding.js
index 52bb168c168..4d35fff55dd 100644
--- a/jstests/noPassthrough/update_now_clustertime_sharding.js
+++ b/jstests/noPassthrough/update_now_clustertime_sharding.js
@@ -8,111 +8,144 @@
* @tags: [requires_find_command, requires_sharding]
*/
(function() {
- "use strict";
+"use strict";
- const st = new ShardingTest({name: jsTestName(), mongos: 1, shards: 2, rs: {nodes: 1}});
+const st = new ShardingTest({name: jsTestName(), mongos: 1, shards: 2, rs: {nodes: 1}});
- const db = st.s.getDB(jsTestName());
- const otherColl = db.other;
- const coll = db.test;
- otherColl.drop();
- coll.drop();
+const db = st.s.getDB(jsTestName());
+const otherColl = db.other;
+const coll = db.test;
+otherColl.drop();
+coll.drop();
- // Enable sharding on the test DB and ensure its primary is shard0.
- assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
- st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
+// Enable sharding on the test DB and ensure its primary is shard0.
+assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+st.ensurePrimaryShard(db.getName(), st.shard0.shardName);
- // Create a sharded collection on {shard: 1}, split across the cluster at {shard: 1}. Do this
- // for both 'coll' and 'otherColl' so that the latter can be used for $merge tests later.
- for (let collToShard of[coll, otherColl]) {
- st.shardColl(collToShard, {shard: 1}, {shard: 1}, {shard: 1});
- }
+// Create a sharded collection on {shard: 1}, split across the cluster at {shard: 1}. Do this
+// for both 'coll' and 'otherColl' so that the latter can be used for $merge tests later.
+for (let collToShard of [coll, otherColl]) {
+ st.shardColl(collToShard, {shard: 1}, {shard: 1}, {shard: 1});
+}
- // Insert N docs, with the _id field set to the current Date. Sleep for a short period between
- // insertions, such that the Date value increases for each successive document. We additionally
- // ensure that the insertions alternate between the two shards by setting the shard key to
- // either 0 or 1.
- let bulk = coll.initializeUnorderedBulkOp();
- const _idStart = new Date();
- const numDocs = 10;
- for (let i = 0; i < numDocs; ++i) {
- bulk.insert({_id: new Date(), insertClusterTime: new Timestamp(0, 0), shard: (i % 2)});
- if (i < numDocs - 1) {
- sleep(100);
- }
+// Insert N docs, with the _id field set to the current Date. Sleep for a short period between
+// insertions, such that the Date value increases for each successive document. We additionally
+// ensure that the insertions alternate between the two shards by setting the shard key to
+// either 0 or 1.
+let bulk = coll.initializeUnorderedBulkOp();
+const _idStart = new Date();
+const numDocs = 10;
+for (let i = 0; i < numDocs; ++i) {
+ bulk.insert({_id: new Date(), insertClusterTime: new Timestamp(0, 0), shard: (i % 2)});
+ if (i < numDocs - 1) {
+ sleep(100);
}
- const _idEnd = new Date();
+}
+const _idEnd = new Date();
- assert.commandWorked(bulk.execute());
+assert.commandWorked(bulk.execute());
- // Test that we cannot issue an update to mongoS with runtime constants already present.
- assert.commandFailedWithCode(db.runCommand({
- update: coll.getName(),
- updates: [{q: {}, u: {$set: {operationFailsBeforeApplyingUpdates: true}}}],
- runtimeConstants: {localNow: new Date(), clusterTime: new Timestamp(0, 0)}
- }),
- 51195);
+// Test that we cannot issue an update to mongoS with runtime constants already present.
+assert.commandFailedWithCode(db.runCommand({
+ update: coll.getName(),
+ updates: [{q: {}, u: {$set: {operationFailsBeforeApplyingUpdates: true}}}],
+ runtimeConstants: {localNow: new Date(), clusterTime: new Timestamp(0, 0)}
+}),
+ 51195);
- // Test that $$NOW and $$CLUSTER_TIME are available and remain constant across all updated
- // documents.
- let writeResult =
- assert.commandWorked(coll.update({$where: "sleep(10); return true"},
- [{$addFields: {now: "$$NOW", ctime: "$$CLUSTER_TIME"}}],
- {multi: true}));
+// Test that $$NOW and $$CLUSTER_TIME are available and remain constant across all updated
+// documents.
+let writeResult =
+ assert.commandWorked(coll.update({$where: "sleep(10); return true"},
+ [{$addFields: {now: "$$NOW", ctime: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- assert.eq(writeResult.nMatched, numDocs);
- assert.eq(writeResult.nModified, numDocs);
+assert.eq(writeResult.nMatched, numDocs);
+assert.eq(writeResult.nModified, numDocs);
- let results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now instanceof Date);
- assert(results[0].ctime instanceof Timestamp);
- for (let result of results) {
- assert.eq(result.now, results[0].now);
- assert.eq(result.ctime, results[0].ctime);
- }
+let results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now instanceof Date);
+assert(results[0].ctime instanceof Timestamp);
+for (let result of results) {
+ assert.eq(result.now, results[0].now);
+ assert.eq(result.ctime, results[0].ctime);
+}
- // Test that $$NOW and $$CLUSTER_TIME advance between updates but remain constant across all
- // updates in a given batch.
- writeResult = assert.commandWorked(db.runCommand({
- update: coll.getName(),
- updates: [
- {
- q: {$where: "sleep(10); return true"},
- u: [{$addFields: {now2: "$$NOW", ctime2: "$$CLUSTER_TIME"}}],
- multi: true
- },
- {
- q: {$where: "sleep(10); return true"},
- u: [{$addFields: {now3: "$$NOW", ctime3: "$$CLUSTER_TIME"}}],
- multi: true
- }
- ]
- }));
+// Test that $$NOW and $$CLUSTER_TIME advance between updates but remain constant across all
+// updates in a given batch.
+writeResult = assert.commandWorked(db.runCommand({
+ update: coll.getName(),
+ updates: [
+ {
+ q: {$where: "sleep(10); return true"},
+ u: [{$addFields: {now2: "$$NOW", ctime2: "$$CLUSTER_TIME"}}],
+ multi: true
+ },
+ {
+ q: {$where: "sleep(10); return true"},
+ u: [{$addFields: {now3: "$$NOW", ctime3: "$$CLUSTER_TIME"}}],
+ multi: true
+ }
+ ]
+}));
+
+assert.eq(writeResult.n, numDocs * 2);
+assert.eq(writeResult.nModified, numDocs * 2);
- assert.eq(writeResult.n, numDocs * 2);
- assert.eq(writeResult.nModified, numDocs * 2);
+results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now2 instanceof Date);
+assert(results[0].ctime2 instanceof Timestamp);
+for (let result of results) {
+ // The now2 and ctime2 fields are greater than the values from the previous update.
+ assert.gt(result.now2, result.now);
+ assert.gt(result.ctime2, result.ctime);
+ // The now2 and ctime2 fields are the same across all documents.
+ assert.eq(result.now2, results[0].now2);
+ assert.eq(result.ctime2, results[0].ctime2);
+ // The now2 and ctime2 fields are the same as now3 and ctime3 across all documents.
+ assert.eq(result.now2, result.now3);
+ assert.eq(result.ctime2, result.ctime3);
+}
+
+// Test that $$NOW and $$CLUSTER_TIME can be used in the query portion of an update.
+const _idMidpoint = new Date(_idStart.getTime() + (_idEnd.getTime() - _idStart.getTime()) / 2);
+writeResult =
+ assert.commandWorked(coll.update({
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ [{$addFields: {now4: "$$NOW", ctime4: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now2 instanceof Date);
- assert(results[0].ctime2 instanceof Timestamp);
- for (let result of results) {
- // The now2 and ctime2 fields are greater than the values from the previous update.
- assert.gt(result.now2, result.now);
- assert.gt(result.ctime2, result.ctime);
- // The now2 and ctime2 fields are the same across all documents.
- assert.eq(result.now2, results[0].now2);
- assert.eq(result.ctime2, results[0].ctime2);
- // The now2 and ctime2 fields are the same as now3 and ctime3 across all documents.
- assert.eq(result.now2, result.now3);
- assert.eq(result.ctime2, result.ctime3);
+assert.lt(writeResult.nMatched, numDocs);
+assert.lt(writeResult.nModified, numDocs);
+
+results = coll.find().sort({_id: 1}).toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now4 instanceof Date);
+assert(results[0].ctime4 instanceof Timestamp);
+for (let result of results) {
+ if (result._id.getTime() < _idMidpoint.getTime()) {
+ assert.eq(result.now4, results[0].now4);
+ assert.eq(result.ctime4, results[0].ctime4);
+ assert.gt(result.now4, result.now3);
+ assert.gt(result.ctime4, result.ctime3);
+ } else {
+ assert.eq(result.now4, undefined);
+ assert.eq(result.ctime4, undefined);
}
+}
- // Test that $$NOW and $$CLUSTER_TIME can be used in the query portion of an update.
- const _idMidpoint = new Date(_idStart.getTime() + (_idEnd.getTime() - _idStart.getTime()) / 2);
- writeResult =
- assert.commandWorked(coll.update({
+// Test that we can explain() an update command that uses $$NOW and $$CLUSTER_TIME.
+assert.commandWorked(
+ coll.explain().update(
+ {
$expr: {
$and: [
{$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
@@ -120,200 +153,163 @@
]
}
},
- [{$addFields: {now4: "$$NOW", ctime4: "$$CLUSTER_TIME"}}],
- {multi: true}));
-
- assert.lt(writeResult.nMatched, numDocs);
- assert.lt(writeResult.nModified, numDocs);
-
- results = coll.find().sort({_id: 1}).toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now4 instanceof Date);
- assert(results[0].ctime4 instanceof Timestamp);
- for (let result of results) {
- if (result._id.getTime() < _idMidpoint.getTime()) {
- assert.eq(result.now4, results[0].now4);
- assert.eq(result.ctime4, results[0].ctime4);
- assert.gt(result.now4, result.now3);
- assert.gt(result.ctime4, result.ctime3);
- } else {
- assert.eq(result.now4, undefined);
- assert.eq(result.ctime4, undefined);
- }
- }
-
- // Test that we can explain() an update command that uses $$NOW and $$CLUSTER_TIME.
- assert.commandWorked(
- coll.explain().update(
- {
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
- {multi: true}));
+ [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
+ {multi: true}));
- // Test that $$NOW and $$CLUSTER_TIME can be used when issuing updates via the Bulk API, and
- // remain constant across all updates within a single bulk operation.
- // TODO SERVER-41174: Note that if the bulk update operation exceeds the maximum BSON command
- // size, it may issue two or more separate update commands. $$NOW and $$CLUSTER_TIME will be
- // constant within each update command, but not across commands.
- bulk = coll.initializeUnorderedBulkOp();
- bulk.find({$where: "sleep(10); return true"}).update([
- {$addFields: {now5: "$$NOW", ctime5: "$$CLUSTER_TIME"}}
- ]);
- bulk.find({$where: "sleep(10); return true"}).update([
- {$addFields: {now6: "$$NOW", ctime6: "$$CLUSTER_TIME"}}
- ]);
- writeResult = assert.commandWorked(bulk.execute());
+// Test that $$NOW and $$CLUSTER_TIME can be used when issuing updates via the Bulk API, and
+// remain constant across all updates within a single bulk operation.
+// TODO SERVER-41174: Note that if the bulk update operation exceeds the maximum BSON command
+// size, it may issue two or more separate update commands. $$NOW and $$CLUSTER_TIME will be
+// constant within each update command, but not across commands.
+bulk = coll.initializeUnorderedBulkOp();
+bulk.find({$where: "sleep(10); return true"}).update([
+ {$addFields: {now5: "$$NOW", ctime5: "$$CLUSTER_TIME"}}
+]);
+bulk.find({$where: "sleep(10); return true"}).update([
+ {$addFields: {now6: "$$NOW", ctime6: "$$CLUSTER_TIME"}}
+]);
+writeResult = assert.commandWorked(bulk.execute());
- assert.eq(writeResult.nMatched, numDocs * 2);
- assert.eq(writeResult.nModified, numDocs * 2);
+assert.eq(writeResult.nMatched, numDocs * 2);
+assert.eq(writeResult.nModified, numDocs * 2);
- results = coll.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].now5 instanceof Date);
- assert(results[0].ctime5 instanceof Timestamp);
- for (let result of results) {
- // The now5 and ctime5 fields are the same across all documents.
- assert.eq(result.now5, results[0].now5);
- assert.eq(result.ctime5, results[0].ctime5);
- // The now5 and ctime5 fields are the same as now6 and ctime6 across all documents.
- assert.eq(result.now5, result.now6);
- assert.eq(result.ctime5, result.ctime6);
- }
+results = coll.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].now5 instanceof Date);
+assert(results[0].ctime5 instanceof Timestamp);
+for (let result of results) {
+ // The now5 and ctime5 fields are the same across all documents.
+ assert.eq(result.now5, results[0].now5);
+ assert.eq(result.ctime5, results[0].ctime5);
+ // The now5 and ctime5 fields are the same as now6 and ctime6 across all documents.
+ assert.eq(result.now5, result.now6);
+ assert.eq(result.ctime5, result.ctime6);
+}
- // Test that we cannot issue a findAndModify to mongoS with runtime constants already present.
- assert.commandFailedWithCode(db.runCommand({
- findAndModify: coll.getName(),
- query: {},
- update: {$set: {operationFailsBeforeApplyingUpdates: true}},
- runtimeConstants: {localNow: new Date(), clusterTime: new Timestamp(0, 0)}
- }),
- 51196);
+// Test that we cannot issue a findAndModify to mongoS with runtime constants already present.
+assert.commandFailedWithCode(db.runCommand({
+ findAndModify: coll.getName(),
+ query: {},
+ update: {$set: {operationFailsBeforeApplyingUpdates: true}},
+ runtimeConstants: {localNow: new Date(), clusterTime: new Timestamp(0, 0)}
+}),
+ 51196);
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify query and update.
- let returnedDoc = coll.findAndModify({
- query: {
- shard: 0,
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- update: [{$addFields: {nowFAM: "$$NOW", ctimeFAM: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- new: true
- });
- assert(returnedDoc.nowFAM instanceof Date);
- assert(returnedDoc.ctimeFAM instanceof Timestamp);
- assert.gt(returnedDoc.nowFAM, returnedDoc.now4);
- assert.gt(returnedDoc.ctimeFAM, returnedDoc.ctime4);
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify query and update.
+let returnedDoc = coll.findAndModify({
+ query: {
+ shard: 0,
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ update: [{$addFields: {nowFAM: "$$NOW", ctimeFAM: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ new: true
+});
+assert(returnedDoc.nowFAM instanceof Date);
+assert(returnedDoc.ctimeFAM instanceof Timestamp);
+assert.gt(returnedDoc.nowFAM, returnedDoc.now4);
+assert.gt(returnedDoc.ctimeFAM, returnedDoc.ctime4);
- results = coll.find({nowFAM: {$exists: true}, ctimeFAM: {$exists: true}}).toArray();
- assert.eq(results.length, 1);
- assert.docEq(results[0], returnedDoc);
+results = coll.find({nowFAM: {$exists: true}, ctimeFAM: {$exists: true}}).toArray();
+assert.eq(results.length, 1);
+assert.docEq(results[0], returnedDoc);
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify upsert.
- returnedDoc = coll.findAndModify({
- query: {shard: 0, fieldDoesNotExist: {$exists: true}},
- update:
- [{$addFields: {_id: "$$NOW", nowFAMUpsert: "$$NOW", ctimeFAMUpsert: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- upsert: true,
- new: true
- });
- assert(returnedDoc.nowFAMUpsert instanceof Date);
- assert(returnedDoc.ctimeFAMUpsert instanceof Timestamp);
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify upsert.
+returnedDoc = coll.findAndModify({
+ query: {shard: 0, fieldDoesNotExist: {$exists: true}},
+ update: [{$addFields: {_id: "$$NOW", nowFAMUpsert: "$$NOW", ctimeFAMUpsert: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ upsert: true,
+ new: true
+});
+assert(returnedDoc.nowFAMUpsert instanceof Date);
+assert(returnedDoc.ctimeFAMUpsert instanceof Timestamp);
- assert.eq(coll.find().itcount(), numDocs + 1);
- results = coll.find({nowFAMUpsert: {$exists: true}, ctimeFAMUpsert: {$exists: true}}).toArray();
- assert.eq(results.length, 1);
- assert.docEq(results[0], returnedDoc);
+assert.eq(coll.find().itcount(), numDocs + 1);
+results = coll.find({nowFAMUpsert: {$exists: true}, ctimeFAMUpsert: {$exists: true}}).toArray();
+assert.eq(results.length, 1);
+assert.docEq(results[0], returnedDoc);
- // Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify delete.
- returnedDoc = coll.findAndModify({
- query: {
- shard: 0,
- nowFAMUpsert: {$exists: true},
- ctimeFAMUpsert: {$exists: true},
- $expr: {
- $and: [
- {$lt: ["$nowFAMUpsert", "$$NOW"]},
- {$gt: ["$$CLUSTER_TIME", "$ctimeFAMUpsert"]}
- ]
- }
- },
- sort: {_id: 1},
- remove: true
- });
- assert.eq(coll.find({nowFAMUpsert: {$exists: true}}).itcount(), 0);
- assert.eq(coll.find().itcount(), numDocs);
- assert.neq(returnedDoc, null);
+// Test that $$NOW and $$CLUSTER_TIME can be used in a findAndModify delete.
+returnedDoc = coll.findAndModify({
+ query: {
+ shard: 0,
+ nowFAMUpsert: {$exists: true},
+ ctimeFAMUpsert: {$exists: true},
+ $expr: {
+ $and:
+ [{$lt: ["$nowFAMUpsert", "$$NOW"]}, {$gt: ["$$CLUSTER_TIME", "$ctimeFAMUpsert"]}]
+ }
+ },
+ sort: {_id: 1},
+ remove: true
+});
+assert.eq(coll.find({nowFAMUpsert: {$exists: true}}).itcount(), 0);
+assert.eq(coll.find().itcount(), numDocs);
+assert.neq(returnedDoc, null);
- // Test that we can explain() a findAndModify command that uses $$NOW and $$CLUSTER_TIME.
- assert.commandWorked(coll.explain().findAndModify({
- query: {
- shard: 0,
- $expr: {
- $and: [
- {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
- {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
- ]
- }
- },
- update:
- [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
- sort: {_id: 1},
- new: true
- }));
+// Test that we can explain() a findAndModify command that uses $$NOW and $$CLUSTER_TIME.
+assert.commandWorked(coll.explain().findAndModify({
+ query: {
+ shard: 0,
+ $expr: {
+ $and: [
+ {$lt: ["$_id", {$min: [_idMidpoint, "$$NOW"]}]},
+ {$gt: ["$$CLUSTER_TIME", "$insertClusterTime"]}
+ ]
+ }
+ },
+ update: [{$addFields: {explainDoesNotWrite1: "$$NOW", explainDoesNotWrite2: "$$CLUSTER_TIME"}}],
+ sort: {_id: 1},
+ new: true
+}));
- // Test that we can use $$NOW and $$CLUSTER_TIME in an update via a $merge aggregation. We first
- // use $merge to copy the current contents of 'coll' into 'otherColl'.
- assert.doesNotThrow(() => coll.aggregate([
- {$merge: {into: otherColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}
- ]));
- // Run an aggregation which adds $$NOW and $$CLUSTER_TIME fields into the pipeline document,
- // then do the same to the documents in the output collection via a pipeline update.
- assert.doesNotThrow(() => coll.aggregate([
- {$addFields: {aggNow: "$$NOW", aggCT: "$$CLUSTER_TIME"}},
- {
- $merge: {
- into: otherColl.getName(),
- let : {aggNow: "$aggNow", aggCT: "$aggCT"},
- whenMatched: [{
- $addFields: {
- aggNow: "$$aggNow",
- aggCT: "$$aggCT",
- mergeNow: "$$NOW",
- mergeCT: "$$CLUSTER_TIME"
- }
- }],
- whenNotMatched: "fail"
- }
+// Test that we can use $$NOW and $$CLUSTER_TIME in an update via a $merge aggregation. We first
+// use $merge to copy the current contents of 'coll' into 'otherColl'.
+assert.doesNotThrow(
+ () => coll.aggregate(
+ [{$merge: {into: otherColl.getName(), whenMatched: "fail", whenNotMatched: "insert"}}]));
+// Run an aggregation which adds $$NOW and $$CLUSTER_TIME fields into the pipeline document,
+// then do the same to the documents in the output collection via a pipeline update.
+assert.doesNotThrow(() => coll.aggregate([
+ {$addFields: {aggNow: "$$NOW", aggCT: "$$CLUSTER_TIME"}},
+ {
+ $merge: {
+ into: otherColl.getName(),
+ let : {aggNow: "$aggNow", aggCT: "$aggCT"},
+ whenMatched: [{
+ $addFields: {
+ aggNow: "$$aggNow",
+ aggCT: "$$aggCT",
+ mergeNow: "$$NOW",
+ mergeCT: "$$CLUSTER_TIME"
+ }
+ }],
+ whenNotMatched: "fail"
}
- ]));
- // Verify that the agg pipeline's $$NOW and $$CLUSTER_TIME match the $merge update pipeline's.
- results = otherColl.find().toArray();
- assert.eq(results.length, numDocs);
- assert(results[0].mergeNow instanceof Date);
- assert(results[0].mergeCT instanceof Timestamp);
- for (let result of results) {
- // The mergeNow and mergeCT fields are greater than the values from the previous updates.
- assert.gt(result.mergeNow, result.now5);
- assert.gt(result.mergeCT, result.ctime5);
- // The mergeNow and mergeCT fields are the same across all documents.
- assert.eq(result.mergeNow, results[0].mergeNow);
- assert.eq(result.mergeCT, results[0].mergeCT);
- // The mergeNow and mergeCT fields are the same as aggNow and aggCT across all documents.
- assert.eq(result.mergeNow, result.aggNow);
- assert.eq(result.mergeCT, result.aggCT);
}
+]));
+// Verify that the agg pipeline's $$NOW and $$CLUSTER_TIME match the $merge update pipeline's.
+results = otherColl.find().toArray();
+assert.eq(results.length, numDocs);
+assert(results[0].mergeNow instanceof Date);
+assert(results[0].mergeCT instanceof Timestamp);
+for (let result of results) {
+ // The mergeNow and mergeCT fields are greater than the values from the previous updates.
+ assert.gt(result.mergeNow, result.now5);
+ assert.gt(result.mergeCT, result.ctime5);
+ // The mergeNow and mergeCT fields are the same across all documents.
+ assert.eq(result.mergeNow, results[0].mergeNow);
+ assert.eq(result.mergeCT, results[0].mergeCT);
+ // The mergeNow and mergeCT fields are the same as aggNow and aggCT across all documents.
+ assert.eq(result.mergeNow, result.aggNow);
+ assert.eq(result.mergeCT, result.aggCT);
+}
- st.stop();
+st.stop();
}());
diff --git a/jstests/noPassthrough/update_post_image_validation.js b/jstests/noPassthrough/update_post_image_validation.js
index 75d0c4ddfcf..ad78227a09b 100644
--- a/jstests/noPassthrough/update_post_image_validation.js
+++ b/jstests/noPassthrough/update_post_image_validation.js
@@ -1,28 +1,28 @@
// Verify that the update system correctly rejects invalid entries during post-image validation.
(function() {
- "use strict";
+"use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod was unable to start up");
- const testDB = conn.getDB("test");
+const testDB = conn.getDB("test");
- // Test validation of elements added to an array that is represented in a "deserialized" format
- // in mutablebson. The added element is invalid because it is a DBRef with a missing $id.
- assert.writeOK(testDB.coll.insert({_id: 0, a: []}));
- assert.writeErrorWithCode(
- testDB.coll.update({_id: 0}, {$set: {"a.1": 0, "a.0": {$ref: "coll", $db: "test"}}}),
- ErrorCodes.InvalidDBRef);
- assert.docEq(testDB.coll.findOne({_id: 0}), {_id: 0, a: []});
+// Test validation of elements added to an array that is represented in a "deserialized" format
+// in mutablebson. The added element is invalid because it is a DBRef with a missing $id.
+assert.writeOK(testDB.coll.insert({_id: 0, a: []}));
+assert.writeErrorWithCode(
+ testDB.coll.update({_id: 0}, {$set: {"a.1": 0, "a.0": {$ref: "coll", $db: "test"}}}),
+ ErrorCodes.InvalidDBRef);
+assert.docEq(testDB.coll.findOne({_id: 0}), {_id: 0, a: []});
- // Test validation of modified array elements that are accessed using a string that is
- // numerically equivalent to their fieldname. The modified element is invalid because it is a
- // DBRef with a missing $id.
- assert.writeOK(testDB.coll.insert({_id: 1, a: [0]}));
- assert.writeErrorWithCode(
- testDB.coll.update({_id: 1}, {$set: {"a.00": {$ref: "coll", $db: "test"}}}),
- ErrorCodes.InvalidDBRef);
- assert.docEq(testDB.coll.findOne({_id: 1}), {_id: 1, a: [0]});
+// Test validation of modified array elements that are accessed using a string that is
+// numerically equivalent to their fieldname. The modified element is invalid because it is a
+// DBRef with a missing $id.
+assert.writeOK(testDB.coll.insert({_id: 1, a: [0]}));
+assert.writeErrorWithCode(
+ testDB.coll.update({_id: 1}, {$set: {"a.00": {$ref: "coll", $db: "test"}}}),
+ ErrorCodes.InvalidDBRef);
+assert.docEq(testDB.coll.findOne({_id: 1}), {_id: 1, a: [0]});
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js
index c0a8dc4fb2e..dd18a14d72c 100644
--- a/jstests/noPassthrough/update_server-5552.js
+++ b/jstests/noPassthrough/update_server-5552.js
@@ -1,38 +1,38 @@
var db;
(function() {
- "use strict";
- const conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod failed to start.");
- db = conn.getDB("test");
+"use strict";
+const conn = MongoRunner.runMongod();
+assert.neq(null, conn, "mongod failed to start.");
+db = conn.getDB("test");
- const t = db.foo;
- t.drop();
+const t = db.foo;
+t.drop();
- const N = 10000;
+const N = 10000;
- var bulk = t.initializeUnorderedBulkOp();
- for (let i = 0; i < N; i++) {
- bulk.insert({_id: i, x: 1});
- }
- assert.writeOK(bulk.execute());
+var bulk = t.initializeUnorderedBulkOp();
+for (let i = 0; i < N; i++) {
+ bulk.insert({_id: i, x: 1});
+}
+assert.writeOK(bulk.execute());
- const join = startParallelShell(
- "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );");
+const join = startParallelShell(
+ "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );");
- t.update({
- $where: function() {
- sleep(1);
- return true;
- }
- },
- {$set: {x: 5}},
- false,
- true);
- db.getLastError();
+t.update({
+ $where: function() {
+ sleep(1);
+ return true;
+ }
+},
+ {$set: {x: 5}},
+ false,
+ true);
+db.getLastError();
- join();
+join();
- assert.eq(N, t.find({x: 5}).count());
+assert.eq(N, t.find({x: 5}).count());
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/upsert_duplicate_key_retry.js b/jstests/noPassthrough/upsert_duplicate_key_retry.js
index 5841f5a7eb0..c2015642b0e 100644
--- a/jstests/noPassthrough/upsert_duplicate_key_retry.js
+++ b/jstests/noPassthrough/upsert_duplicate_key_retry.js
@@ -10,81 +10,79 @@
*/
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const testDB = rst.getPrimary().getDB("test");
- const adminDB = testDB.getSiblingDB("admin");
- const collName = "upsert_duplicate_key_retry";
- const testColl = testDB.getCollection(collName);
-
- testDB.runCommand({drop: collName});
-
- // Queries current operations until 'count' matching operations are found.
- function awaitMatchingCurrentOpCount(message, count) {
- assert.soon(() => {
- const currentOp =
- adminDB.aggregate([{$currentOp: {}}, {$match: {msg: message}}]).toArray();
- return (currentOp.length === count);
- });
- }
-
- function performUpsert() {
- // This function is called from startParallelShell(), so closed-over variables will not be
- // available. We must re-obtain the value of 'testColl' in the function body.
- const testColl = db.getMongo().getDB("test").getCollection("upsert_duplicate_key_retry");
- assert.commandWorked(testColl.update({x: 3}, {$inc: {y: 1}}, {upsert: true}));
- }
-
- assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
-
- // Will hang upsert operations just prior to performing an insert.
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "hangBeforeUpsertPerformsInsert", mode: "alwaysOn"}));
-
- const awaitUpdate1 = startParallelShell(performUpsert, rst.ports[0]);
- const awaitUpdate2 = startParallelShell(performUpsert, rst.ports[0]);
-
- awaitMatchingCurrentOpCount("hangBeforeUpsertPerformsInsert", 2);
-
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "hangBeforeUpsertPerformsInsert", mode: "off"}));
-
- awaitUpdate1();
- awaitUpdate2();
-
- const cursor = testColl.find({}, {_id: 0});
- assert.eq(cursor.next(), {x: 3, y: 2});
- assert(!cursor.hasNext(), cursor.toArray());
-
- // Confirm that oplog entries exist for both insert and update operation.
- const oplogColl = testDB.getSiblingDB("local").getCollection("oplog.rs");
- assert.eq(1, oplogColl.find({"op": "i", "ns": "test.upsert_duplicate_key_retry"}).itcount());
- assert.eq(1, oplogColl.find({"op": "u", "ns": "test.upsert_duplicate_key_retry"}).itcount());
-
- //
- // Confirm DuplicateKey error for cases that should not be retried.
- //
- assert.commandWorked(testDB.runCommand({drop: collName}));
- assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
-
- // DuplicateKey error on replacement-style upsert, where the unique index key value to be
- // written does not match the value of the query predicate.
- assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
- assert.commandWorked(testColl.insert({_id: 1, 'a': 12345}));
- assert.commandFailedWithCode(testColl.update({x: 3}, {}, {upsert: true}),
- ErrorCodes.DuplicateKey);
-
- // DuplicateKey error on update-style upsert, where the unique index key value to be written
- // does not match the value of the query predicate.
- assert.commandWorked(testColl.remove({}));
- assert.commandWorked(testColl.insert({x: 3}));
- assert.commandWorked(testColl.insert({x: 4}));
- assert.commandFailedWithCode(testColl.update({x: 3}, {$inc: {x: 1}}, {upsert: true}),
- ErrorCodes.DuplicateKey);
-
- rst.stopSet();
+"use strict";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const testDB = rst.getPrimary().getDB("test");
+const adminDB = testDB.getSiblingDB("admin");
+const collName = "upsert_duplicate_key_retry";
+const testColl = testDB.getCollection(collName);
+
+testDB.runCommand({drop: collName});
+
+// Queries current operations until 'count' matching operations are found.
+function awaitMatchingCurrentOpCount(message, count) {
+ assert.soon(() => {
+ const currentOp = adminDB.aggregate([{$currentOp: {}}, {$match: {msg: message}}]).toArray();
+ return (currentOp.length === count);
+ });
+}
+
+function performUpsert() {
+ // This function is called from startParallelShell(), so closed-over variables will not be
+ // available. We must re-obtain the value of 'testColl' in the function body.
+ const testColl = db.getMongo().getDB("test").getCollection("upsert_duplicate_key_retry");
+ assert.commandWorked(testColl.update({x: 3}, {$inc: {y: 1}}, {upsert: true}));
+}
+
+assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
+
+// Will hang upsert operations just prior to performing an insert.
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangBeforeUpsertPerformsInsert", mode: "alwaysOn"}));
+
+const awaitUpdate1 = startParallelShell(performUpsert, rst.ports[0]);
+const awaitUpdate2 = startParallelShell(performUpsert, rst.ports[0]);
+
+awaitMatchingCurrentOpCount("hangBeforeUpsertPerformsInsert", 2);
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "hangBeforeUpsertPerformsInsert", mode: "off"}));
+
+awaitUpdate1();
+awaitUpdate2();
+
+const cursor = testColl.find({}, {_id: 0});
+assert.eq(cursor.next(), {x: 3, y: 2});
+assert(!cursor.hasNext(), cursor.toArray());
+
+// Confirm that oplog entries exist for both insert and update operation.
+const oplogColl = testDB.getSiblingDB("local").getCollection("oplog.rs");
+assert.eq(1, oplogColl.find({"op": "i", "ns": "test.upsert_duplicate_key_retry"}).itcount());
+assert.eq(1, oplogColl.find({"op": "u", "ns": "test.upsert_duplicate_key_retry"}).itcount());
+
+//
+// Confirm DuplicateKey error for cases that should not be retried.
+//
+assert.commandWorked(testDB.runCommand({drop: collName}));
+assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
+
+// DuplicateKey error on replacement-style upsert, where the unique index key value to be
+// written does not match the value of the query predicate.
+assert.commandWorked(testColl.createIndex({x: 1}, {unique: true}));
+assert.commandWorked(testColl.insert({_id: 1, 'a': 12345}));
+assert.commandFailedWithCode(testColl.update({x: 3}, {}, {upsert: true}), ErrorCodes.DuplicateKey);
+
+// DuplicateKey error on update-style upsert, where the unique index key value to be written
+// does not match the value of the query predicate.
+assert.commandWorked(testColl.remove({}));
+assert.commandWorked(testColl.insert({x: 3}));
+assert.commandWorked(testColl.insert({x: 4}));
+assert.commandFailedWithCode(testColl.update({x: 3}, {$inc: {x: 1}}, {upsert: true}),
+ ErrorCodes.DuplicateKey);
+
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/use_disk.js b/jstests/noPassthrough/use_disk.js
index ec7778d23da..7cb10d796ac 100644
--- a/jstests/noPassthrough/use_disk.js
+++ b/jstests/noPassthrough/use_disk.js
@@ -3,150 +3,146 @@
// Confirms that profiled aggregation execution contains expected values for usedDisk.
(function() {
- "use strict";
+"use strict";
- // For getLatestProfilerEntry and getProfilerProtocolStringForCommand
- load("jstests/libs/profiler.js");
- const conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=8"});
- const testDB = conn.getDB("profile_agg");
- const coll = testDB.getCollection("test");
+// For getLatestProfilerEntry and getProfilerProtocolStringForCommand
+load("jstests/libs/profiler.js");
+const conn = MongoRunner.runMongod({setParameter: "maxBSONDepth=8"});
+const testDB = conn.getDB("profile_agg");
+const coll = testDB.getCollection("test");
- testDB.setProfilingLevel(2);
+testDB.setProfilingLevel(2);
- function resetCollection() {
- coll.drop();
- for (var i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
+function resetCollection() {
+ coll.drop();
+ for (var i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
}
- function resetForeignCollection() {
- testDB.foreign.drop();
- const forColl = testDB.getCollection("foreign");
- for (var i = 4; i < 18; i += 2)
- assert.writeOK(forColl.insert({b: i}));
- }
- //
- // Confirm hasSortStage with in-memory sort.
- //
- resetCollection();
- //
- // Confirm 'usedDisk' is not set if 'allowDiskUse' is set but no stages need to use disk.
- //
- coll.aggregate([{$match: {a: {$gte: 2}}}], {allowDiskUse: true});
- var profileObj = getLatestProfilerEntry(testDB);
- assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
-
- resetCollection();
- coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}], {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
- assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
-
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
- assert.eq(8,
- coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}], {allowDiskUse: true})
- .itcount());
- profileObj = getLatestProfilerEntry(testDB);
-
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
- assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
-
- //
- // Confirm that disk use is correctly detected for the $facet stage.
- //
- resetCollection();
- coll.aggregate([{$facet: {"aSort": [{$sortByCount: "$a"}]}}], {allowDiskUse: true});
-
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected for the $group stage.
- //
- resetCollection();
-
- coll.aggregate([{$group: {"_id": {$avg: "$a"}}}], {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
-
- assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalDocumentSourceGroupMaxMemoryBytes: 10}));
- resetCollection();
- coll.aggregate([{$group: {"_id": {$avg: "$a"}}}], {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected for the $lookup stage with a subsequent $unwind.
- //
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [
- {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
- {$unwind: "$same"}
- ],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected for the $lookup stage without a subsequent
- // $unwind.
- //
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [{$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}}],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected when $limit is set after the $lookup stage.
- //
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [
- {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
- {$unwind: "$same"},
- {$limit: 3}
- ],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Confirm that usedDisk is correctly detected when $limit is set before the $lookup stage.
- //
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [
- {$limit: 1},
- {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
- {$unwind: "$same"}
- ],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert.eq(profileObj.usedDisk, true, tojson(profileObj));
-
- //
- // Test that usedDisk is not set for a $lookup with a pipeline that does not use disk.
- //
- assert.commandWorked(testDB.adminCommand(
- {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024}));
- resetCollection();
- resetForeignCollection();
- coll.aggregate(
- [{
- $lookup:
- {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "otherTest", as: "same"}
- }],
- {allowDiskUse: true});
- profileObj = getLatestProfilerEntry(testDB);
- assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
- MongoRunner.stopMongod(conn);
+}
+function resetForeignCollection() {
+ testDB.foreign.drop();
+ const forColl = testDB.getCollection("foreign");
+ for (var i = 4; i < 18; i += 2)
+ assert.writeOK(forColl.insert({b: i}));
+}
+//
+// Confirm hasSortStage with in-memory sort.
+//
+resetCollection();
+//
+// Confirm 'usedDisk' is not set if 'allowDiskUse' is set but no stages need to use disk.
+//
+coll.aggregate([{$match: {a: {$gte: 2}}}], {allowDiskUse: true});
+var profileObj = getLatestProfilerEntry(testDB);
+assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
+
+resetCollection();
+coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}], {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
+assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
+
+assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 10}));
+assert.eq(
+ 8, coll.aggregate([{$match: {a: {$gte: 2}}}, {$sort: {a: 1}}], {allowDiskUse: true}).itcount());
+profileObj = getLatestProfilerEntry(testDB);
+
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+assert.eq(profileObj.hasSortStage, true, tojson(profileObj));
+
+//
+// Confirm that disk use is correctly detected for the $facet stage.
+//
+resetCollection();
+coll.aggregate([{$facet: {"aSort": [{$sortByCount: "$a"}]}}], {allowDiskUse: true});
+
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected for the $group stage.
+//
+resetCollection();
+
+coll.aggregate([{$group: {"_id": {$avg: "$a"}}}], {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
+
+assert.commandWorked(
+ testDB.adminCommand({setParameter: 1, internalDocumentSourceGroupMaxMemoryBytes: 10}));
+resetCollection();
+coll.aggregate([{$group: {"_id": {$avg: "$a"}}}], {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected for the $lookup stage with a subsequent $unwind.
+//
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [
+ {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
+ {$unwind: "$same"}
+ ],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected for the $lookup stage without a subsequent
+// $unwind.
+//
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [{$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}}],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected when $limit is set after the $lookup stage.
+//
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [
+ {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
+ {$unwind: "$same"},
+ {$limit: 3}
+ ],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Confirm that usedDisk is correctly detected when $limit is set before the $lookup stage.
+//
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [
+ {$limit: 1},
+ {$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "foreign", as: "same"}},
+ {$unwind: "$same"}
+ ],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert.eq(profileObj.usedDisk, true, tojson(profileObj));
+
+//
+// Test that usedDisk is not set for a $lookup with a pipeline that does not use disk.
+//
+assert.commandWorked(testDB.adminCommand(
+ {setParameter: 1, internalDocumentSourceSortMaxBlockingSortBytes: 100 * 1024 * 1024}));
+resetCollection();
+resetForeignCollection();
+coll.aggregate(
+ [{$lookup: {let : {var1: "$a"}, pipeline: [{$sort: {a: 1}}], from: "otherTest", as: "same"}}],
+ {allowDiskUse: true});
+profileObj = getLatestProfilerEntry(testDB);
+assert(!profileObj.hasOwnProperty("usedDisk"), tojson(profileObj));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/utf8_paths.js b/jstests/noPassthrough/utf8_paths.js
index 49cb5a63bac..b7b17355457 100644
--- a/jstests/noPassthrough/utf8_paths.js
+++ b/jstests/noPassthrough/utf8_paths.js
@@ -2,36 +2,36 @@
* Test that verifies mongod can start using paths that contain UTF-8 characters that are not ASCII.
*/
(function() {
- 'use strict';
- var db_name = "ελληνικά";
- var path = MongoRunner.dataPath + "Росси́я";
-
- mkdir(path);
-
- // Test MongoD
- let testMongoD = function() {
- let options = {
- dbpath: path,
- useLogFiles: true,
- pidfilepath: path + "/pidfile",
- };
+'use strict';
+var db_name = "ελληνικά";
+var path = MongoRunner.dataPath + "Росси́я";
+
+mkdir(path);
+
+// Test MongoD
+let testMongoD = function() {
+ let options = {
+ dbpath: path,
+ useLogFiles: true,
+ pidfilepath: path + "/pidfile",
+ };
- // directoryperdb is only supported with the wiredTiger storage engine
- if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- options["directoryperdb"] = "";
- }
+ // directoryperdb is only supported with the wiredTiger storage engine
+ if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
+ options["directoryperdb"] = "";
+ }
- let conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, 'mongod was unable to start up');
+ let conn = MongoRunner.runMongod(options);
+ assert.neq(null, conn, 'mongod was unable to start up');
- let coll = conn.getCollection(db_name + ".foo");
- assert.writeOK(coll.insert({_id: 1}));
+ let coll = conn.getCollection(db_name + ".foo");
+ assert.writeOK(coll.insert({_id: 1}));
- MongoRunner.stopMongod(conn);
- };
+ MongoRunner.stopMongod(conn);
+};
- testMongoD();
+testMongoD();
- // Start a second time to test things like log rotation.
- testMongoD();
+// Start a second time to test things like log rotation.
+testMongoD();
})();
diff --git a/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js b/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js
index 8a3c1d0a5ac..4b4cacfbbba 100644
--- a/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js
+++ b/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js
@@ -7,195 +7,191 @@
var db;
(function() {
- "use strict";
-
- load("jstests/libs/feature_compatibility_version.js");
-
- // We skip doing the data consistency checks while terminating the cluster because they conflict
- // with the counts of the number of times the "validate" command is run.
- TestData.skipCollectionAndIndexValidation = true;
-
- function makePatternForValidate(dbName, collName) {
- return new RegExp(
- "COMMAND.*command " + dbName +
- "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" + collName +
- "\"",
- "g");
+"use strict";
+
+load("jstests/libs/feature_compatibility_version.js");
+
+// We skip doing the data consistency checks while terminating the cluster because they conflict
+// with the counts of the number of times the "validate" command is run.
+TestData.skipCollectionAndIndexValidation = true;
+
+function makePatternForValidate(dbName, collName) {
+ return new RegExp("COMMAND.*command " + dbName +
+ "\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" +
+ collName + "\"",
+ "g");
+}
+
+function makePatternForSetFCV(targetVersion) {
+ return new RegExp(
+ "COMMAND.*command.*appName: \"MongoDB Shell\" command: setFeatureCompatibilityVersion" +
+ " { setFeatureCompatibilityVersion: \"" + targetVersion + "\"",
+ "g");
+}
+
+function countMatches(pattern, output) {
+ assert(pattern.global, "the 'g' flag must be used to find all matches");
+
+ let numMatches = 0;
+ while (pattern.exec(output) !== null) {
+ ++numMatches;
}
-
- function makePatternForSetFCV(targetVersion) {
- return new RegExp(
- "COMMAND.*command.*appName: \"MongoDB Shell\" command: setFeatureCompatibilityVersion" +
- " { setFeatureCompatibilityVersion: \"" + targetVersion + "\"",
- "g");
+ return numMatches;
+}
+
+function runValidateHook(testCase) {
+ db = testCase.conn.getDB("test");
+ TestData.forceValidationWithFeatureCompatibilityVersion = latestFCV;
+ try {
+ clearRawMongoProgramOutput();
+
+ load("jstests/hooks/run_validate_collections.js");
+
+ // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
+ // will return all of their output.
+ testCase.teardown();
+ return rawMongoProgramOutput();
+ } finally {
+ db = undefined;
+ TestData.forceValidationWithFeatureCompatibilityVersion = undefined;
}
-
- function countMatches(pattern, output) {
- assert(pattern.global, "the 'g' flag must be used to find all matches");
-
- let numMatches = 0;
- while (pattern.exec(output) !== null) {
- ++numMatches;
+}
+
+function testStandalone(additionalSetupFn, {
+ expectedAtTeardownFCV,
+ expectedSetLastStableFCV: expectedSetLastStableFCV = 0,
+ expectedSetLatestFCV: expectedSetLatestFCV = 0
+} = {}) {
+ const conn =
+ MongoRunner.runMongod({setParameter: {logComponentVerbosity: tojson({command: 1})}});
+ assert.neq(conn, "mongod was unable to start up");
+
+ // Insert a document so the "validate" command has some actual work to do.
+ assert.commandWorked(conn.getDB("test").mycoll.insert({}));
+
+ // Run the additional setup function to put the server into the desired state.
+ additionalSetupFn(conn);
+
+ const output = runValidateHook({
+ conn: conn,
+ teardown: () => {
+ // The validate hook should leave the server with a feature compatibility version of
+ // 'expectedAtTeardownFCV' and no targetVersion.
+ checkFCV(conn.getDB("admin"), expectedAtTeardownFCV);
+ MongoRunner.stopMongod(conn);
}
- return numMatches;
+ });
+
+ const pattern = makePatternForValidate("test", "mycoll");
+ assert.eq(1,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from mongod in the log output");
+
+ for (let [targetVersion, expectedCount] of [[lastStableFCV, expectedSetLastStableFCV],
+ [latestFCV, expectedSetLatestFCV]]) {
+ // Since the additionalSetupFn() function may run the setFeatureCompatibilityVersion
+ // command and we don't have a guarantee those log messages were cleared when
+ // clearRawMongoProgramOutput() was called, we assert 'expectedSetLastStableFCV' and
+ // 'expectedSetLatestFCV' as lower bounds.
+ const pattern = makePatternForSetFCV(targetVersion);
+ assert.lte(expectedCount,
+ countMatches(pattern, output),
+ "expected to find " + tojson(pattern) + " from mongod in the log output");
}
+}
+
+function forceInterruptedUpgradeOrDowngrade(conn, targetVersion) {
+ // We create a separate connection to the server exclusively for running the
+ // setFeatureCompatibilityVersion command so only that operation is ever interrupted by
+ // the checkForInterruptFail failpoint.
+ const setFCVConn = new Mongo(conn.host);
+ const myUriRes = assert.commandWorked(setFCVConn.adminCommand({whatsmyuri: 1}));
+ const myUri = myUriRes.you;
+
+ const curOpRes = assert.commandWorked(setFCVConn.adminCommand({currentOp: 1, client: myUri}));
+ const threadName = curOpRes.inprog[0].desc;
+
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "checkForInterruptFail",
+ mode: "alwaysOn",
+ data: {threadName, chance: 0.05},
+ }));
+
+ let attempts = 0;
+ assert.soon(
+ function() {
+ let res = setFCVConn.adminCommand({setFeatureCompatibilityVersion: targetVersion});
+
+ if (res.ok === 1) {
+ assert.commandWorked(res);
+ } else {
+ assert.commandFailedWithCode(res, ErrorCodes.Interrupted);
+ }
- function runValidateHook(testCase) {
- db = testCase.conn.getDB("test");
- TestData.forceValidationWithFeatureCompatibilityVersion = latestFCV;
- try {
- clearRawMongoProgramOutput();
-
- load("jstests/hooks/run_validate_collections.js");
-
- // We terminate the processes to ensure that the next call to rawMongoProgramOutput()
- // will return all of their output.
- testCase.teardown();
- return rawMongoProgramOutput();
- } finally {
- db = undefined;
- TestData.forceValidationWithFeatureCompatibilityVersion = undefined;
- }
- }
+ ++attempts;
+
+ res = assert.commandWorked(
+ conn.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}));
- function testStandalone(additionalSetupFn, {
- expectedAtTeardownFCV,
- expectedSetLastStableFCV: expectedSetLastStableFCV = 0,
- expectedSetLatestFCV: expectedSetLatestFCV = 0
- } = {}) {
- const conn =
- MongoRunner.runMongod({setParameter: {logComponentVerbosity: tojson({command: 1})}});
- assert.neq(conn, "mongod was unable to start up");
-
- // Insert a document so the "validate" command has some actual work to do.
- assert.commandWorked(conn.getDB("test").mycoll.insert({}));
-
- // Run the additional setup function to put the server into the desired state.
- additionalSetupFn(conn);
-
- const output = runValidateHook({
- conn: conn,
- teardown: () => {
- // The validate hook should leave the server with a feature compatibility version of
- // 'expectedAtTeardownFCV' and no targetVersion.
- checkFCV(conn.getDB("admin"), expectedAtTeardownFCV);
- MongoRunner.stopMongod(conn);
+ if (res.featureCompatibilityVersion.hasOwnProperty("targetVersion")) {
+ checkFCV(conn.getDB("admin"), lastStableFCV, targetVersion);
+ jsTest.log(`Reached partially downgraded state after ${attempts} attempts`);
+ return true;
}
- });
-
- const pattern = makePatternForValidate("test", "mycoll");
- assert.eq(1,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from mongod in the log output");
-
- for (let [targetVersion, expectedCount] of[[lastStableFCV, expectedSetLastStableFCV],
- [latestFCV, expectedSetLatestFCV]]) {
- // Since the additionalSetupFn() function may run the setFeatureCompatibilityVersion
- // command and we don't have a guarantee those log messages were cleared when
- // clearRawMongoProgramOutput() was called, we assert 'expectedSetLastStableFCV' and
- // 'expectedSetLatestFCV' as lower bounds.
- const pattern = makePatternForSetFCV(targetVersion);
- assert.lte(expectedCount,
- countMatches(pattern, output),
- "expected to find " + tojson(pattern) + " from mongod in the log output");
- }
- }
- function forceInterruptedUpgradeOrDowngrade(conn, targetVersion) {
- // We create a separate connection to the server exclusively for running the
- // setFeatureCompatibilityVersion command so only that operation is ever interrupted by
- // the checkForInterruptFail failpoint.
- const setFCVConn = new Mongo(conn.host);
- const myUriRes = assert.commandWorked(setFCVConn.adminCommand({whatsmyuri: 1}));
- const myUri = myUriRes.you;
-
- const curOpRes =
- assert.commandWorked(setFCVConn.adminCommand({currentOp: 1, client: myUri}));
- const threadName = curOpRes.inprog[0].desc;
-
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "checkForInterruptFail",
- mode: "alwaysOn",
- data: {threadName, chance: 0.05},
- }));
-
- let attempts = 0;
- assert.soon(
- function() {
- let res = setFCVConn.adminCommand({setFeatureCompatibilityVersion: targetVersion});
-
- if (res.ok === 1) {
- assert.commandWorked(res);
- } else {
- assert.commandFailedWithCode(res, ErrorCodes.Interrupted);
- }
-
- ++attempts;
-
- res = assert.commandWorked(
- conn.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}));
-
- if (res.featureCompatibilityVersion.hasOwnProperty("targetVersion")) {
- checkFCV(conn.getDB("admin"), lastStableFCV, targetVersion);
- jsTest.log(`Reached partially downgraded state after ${attempts} attempts`);
- return true;
- }
-
- // Either upgrade the feature compatibility version so we can try downgrading again,
- // or downgrade the feature compatibility version so we can try upgrading again.
- // Note that we're using 'conn' rather than 'setFCVConn' to avoid the upgrade being
- // interrupted.
- assert.commandWorked(conn.adminCommand({
- setFeatureCompatibilityVersion: targetVersion === lastStableFCV ? latestFCV
- : lastStableFCV
- }));
- },
- "failed to get featureCompatibilityVersion document into a partially downgraded" +
- " state");
-
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "checkForInterruptFail",
- mode: "off",
- }));
- }
+ // Either upgrade the feature compatibility version so we can try downgrading again,
+ // or downgrade the feature compatibility version so we can try upgrading again.
+ // Note that we're using 'conn' rather than 'setFCVConn' to avoid the upgrade being
+ // interrupted.
+ assert.commandWorked(conn.adminCommand({
+ setFeatureCompatibilityVersion: targetVersion === lastStableFCV ? latestFCV
+ : lastStableFCV
+ }));
+ },
+ "failed to get featureCompatibilityVersion document into a partially downgraded" +
+ " state");
+
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "checkForInterruptFail",
+ mode: "off",
+ }));
+}
+
+(function testStandaloneInLatestFCV() {
+ testStandalone(conn => {
+ checkFCV(conn.getDB("admin"), latestFCV);
+ }, {expectedAtTeardownFCV: latestFCV});
+})();
- (function testStandaloneInLatestFCV() {
- testStandalone(conn => {
- checkFCV(conn.getDB("admin"), latestFCV);
- }, {expectedAtTeardownFCV: latestFCV});
- })();
-
- (function testStandaloneInLastStableFCV() {
- testStandalone(conn => {
- assert.commandWorked(
- conn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(conn.getDB("admin"), lastStableFCV);
- }, {
- expectedAtTeardownFCV: lastStableFCV,
- expectedSetLastStableFCV: 1,
- expectedSetLatestFCV: 1
- });
- })();
-
- (function testStandaloneWithInterruptedFCVDowngrade() {
- testStandalone(conn => {
- forceInterruptedUpgradeOrDowngrade(conn, lastStableFCV);
- }, {
- expectedAtTeardownFCV: lastStableFCV,
- expectedSetLastStableFCV: 2,
- expectedSetLatestFCV: 1
- });
- })();
-
- (function testStandaloneWithInterruptedFCVUpgrade() {
- testStandalone(conn => {
- assert.commandWorked(
- conn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- forceInterruptedUpgradeOrDowngrade(conn, latestFCV);
- }, {
- expectedAtTeardownFCV: lastStableFCV,
- expectedSetLastStableFCV: 1,
- expectedSetLatestFCV: 1
- });
- })();
+(function testStandaloneInLastStableFCV() {
+ testStandalone(conn => {
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ checkFCV(conn.getDB("admin"), lastStableFCV);
+ }, {
+ expectedAtTeardownFCV: lastStableFCV,
+ expectedSetLastStableFCV: 1,
+ expectedSetLatestFCV: 1
+ });
+})();
+
+(function testStandaloneWithInterruptedFCVDowngrade() {
+ testStandalone(conn => {
+ forceInterruptedUpgradeOrDowngrade(conn, lastStableFCV);
+ }, {
+ expectedAtTeardownFCV: lastStableFCV,
+ expectedSetLastStableFCV: 2,
+ expectedSetLatestFCV: 1
+ });
+})();
+
+(function testStandaloneWithInterruptedFCVUpgrade() {
+ testStandalone(conn => {
+ assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+ forceInterruptedUpgradeOrDowngrade(conn, latestFCV);
+ }, {
+ expectedAtTeardownFCV: lastStableFCV,
+ expectedSetLastStableFCV: 1,
+ expectedSetLatestFCV: 1
+ });
+})();
})();
diff --git a/jstests/noPassthrough/verify_session_cache_updates.js b/jstests/noPassthrough/verify_session_cache_updates.js
index 48622ba7b95..47d6068d5cf 100644
--- a/jstests/noPassthrough/verify_session_cache_updates.js
+++ b/jstests/noPassthrough/verify_session_cache_updates.js
@@ -1,76 +1,76 @@
// @tags: [requires_sharding]
(function() {
- 'use strict';
+'use strict';
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- function runTest(conn) {
- for (var i = 0; i < 10; ++i) {
- conn.getDB("test").test.save({a: i});
- }
+function runTest(conn) {
+ for (var i = 0; i < 10; ++i) {
+ conn.getDB("test").test.save({a: i});
+ }
- function verify(conn, nRecords) {
- conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
- assert.eq(nRecords, conn.getDB("config").system.sessions.find({}).count());
- }
+ function verify(conn, nRecords) {
+ conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+ assert.eq(nRecords, conn.getDB("config").system.sessions.find({}).count());
+ }
- function getLastUse(conn) {
- conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
- return conn.getDB("config").system.sessions.findOne({}).lastUse;
- }
+ function getLastUse(conn) {
+ conn.getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+ return conn.getDB("config").system.sessions.findOne({}).lastUse;
+ }
- // initially we have no sessions
- verify(conn, 0);
+ // initially we have no sessions
+ verify(conn, 0);
- // Calling startSession in the shell doesn't initiate the session
- var session = conn.startSession();
- verify(conn, 0);
+ // Calling startSession in the shell doesn't initiate the session
+ var session = conn.startSession();
+ verify(conn, 0);
- // running a command that doesn't require auth does touch
- session.getDatabase("admin").runCommand("isMaster");
- verify(conn, 1);
+ // running a command that doesn't require auth does touch
+ session.getDatabase("admin").runCommand("isMaster");
+ verify(conn, 1);
- // running a session updating command does touch
+ // running a session updating command does touch
+ session.getDatabase("admin").runCommand({serverStatus: 1});
+ verify(conn, 1);
+
+ // running a session updating command updates last use
+ {
+ var lastUse = getLastUse(conn);
+ sleep(200);
session.getDatabase("admin").runCommand({serverStatus: 1});
verify(conn, 1);
-
- // running a session updating command updates last use
- {
- var lastUse = getLastUse(conn);
- sleep(200);
- session.getDatabase("admin").runCommand({serverStatus: 1});
- verify(conn, 1);
- assert.gt(getLastUse(conn), lastUse);
- }
-
- // verify that reading from a cursor updates last use
- {
- var cursor = session.getDatabase("test").test.find({}).batchSize(1);
- cursor.next();
- var lastUse = getLastUse(conn);
- sleep(200);
- verify(conn, 1);
- cursor.next();
- assert.gt(getLastUse(conn), lastUse);
- }
-
- session.endSession();
+ assert.gt(getLastUse(conn), lastUse);
}
+ // verify that reading from a cursor updates last use
{
- var mongod = MongoRunner.runMongod({nojournal: ""});
- runTest(mongod);
- MongoRunner.stopMongod(mongod);
+ var cursor = session.getDatabase("test").test.find({}).batchSize(1);
+ cursor.next();
+ var lastUse = getLastUse(conn);
+ sleep(200);
+ verify(conn, 1);
+ cursor.next();
+ assert.gt(getLastUse(conn), lastUse);
}
- {
- var st = new ShardingTest({shards: 1, mongos: 1, config: 1});
- st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+ session.endSession();
+}
- runTest(st.s0);
- st.stop();
- }
+{
+ var mongod = MongoRunner.runMongod({nojournal: ""});
+ runTest(mongod);
+ MongoRunner.stopMongod(mongod);
+}
+
+{
+ var st = new ShardingTest({shards: 1, mongos: 1, config: 1});
+ st.rs0.getPrimary().getDB("admin").runCommand({refreshLogicalSessionCacheNow: 1});
+
+ runTest(st.s0);
+ st.stop();
+}
})();
diff --git a/jstests/noPassthrough/verify_sessions_expiration.js b/jstests/noPassthrough/verify_sessions_expiration.js
index cdf34928772..7940b995253 100644
--- a/jstests/noPassthrough/verify_sessions_expiration.js
+++ b/jstests/noPassthrough/verify_sessions_expiration.js
@@ -14,126 +14,130 @@
// replace it in the config.system.sessions collection.
(function() {
- "use strict";
-
- // This test makes assertions about the number of logical session records.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
-
- const refresh = {refreshLogicalSessionCacheNow: 1};
- const startSession = {startSession: 1};
- const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
-
- function refreshSessionsAndVerifyCount(config, expectedCount) {
- config.runCommand(refresh);
- assert.eq(config.system.sessions.count(), expectedCount);
- }
-
- function getSessions(config) {
- return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
- }
-
- function verifyOpenCursorCount(db, expectedCount) {
- assert.eq(db.serverStatus().metrics.cursor.open.total, expectedCount);
- }
-
- const dbName = "test";
- const testCollName = "verify_sessions_find_get_more";
-
- let conn = MongoRunner.runMongod();
- let db = conn.getDB(dbName);
- let config = conn.getDB("config");
-
- // 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
- for (let i = 0; i < 5; i++) {
- let res = db.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- }
- refreshSessionsAndVerifyCount(config, 5);
-
- // Manually delete entries in config.system.sessions to simulate TTL expiration.
- assert.commandWorked(config.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(config, 0);
-
- // 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
- // config.system.sessions collection.
- for (let i = 0; i < 10; i++) {
- db[testCollName].insert({_id: i, a: i, b: 1});
- }
-
- let cursors = [];
- for (let i = 0; i < 5; i++) {
- let session = db.getMongo().startSession({});
- assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
- "initialize the session");
- cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
- assert(cursors[i].hasNext());
+"use strict";
+
+// This test makes assertions about the number of logical session records.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
+
+const refresh = {
+ refreshLogicalSessionCacheNow: 1
+};
+const startSession = {
+ startSession: 1
+};
+const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+
+function refreshSessionsAndVerifyCount(config, expectedCount) {
+ config.runCommand(refresh);
+ assert.eq(config.system.sessions.count(), expectedCount);
+}
+
+function getSessions(config) {
+ return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
+}
+
+function verifyOpenCursorCount(db, expectedCount) {
+ assert.eq(db.serverStatus().metrics.cursor.open.total, expectedCount);
+}
+
+const dbName = "test";
+const testCollName = "verify_sessions_find_get_more";
+
+let conn = MongoRunner.runMongod();
+let db = conn.getDB(dbName);
+let config = conn.getDB("config");
+
+// 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
+for (let i = 0; i < 5; i++) {
+ let res = db.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+}
+refreshSessionsAndVerifyCount(config, 5);
+
+// Manually delete entries in config.system.sessions to simulate TTL expiration.
+assert.commandWorked(config.system.sessions.remove({}));
+refreshSessionsAndVerifyCount(config, 0);
+
+// 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
+// config.system.sessions collection.
+for (let i = 0; i < 10; i++) {
+ db[testCollName].insert({_id: i, a: i, b: 1});
+}
+
+let cursors = [];
+for (let i = 0; i < 5; i++) {
+ let session = db.getMongo().startSession({});
+ assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
+ "initialize the session");
+ cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
+ assert(cursors[i].hasNext());
+}
+
+refreshSessionsAndVerifyCount(config, 5);
+verifyOpenCursorCount(config, 5);
+
+let sessionsCollectionArray;
+let lastUseValues = [];
+for (let i = 0; i < 3; i++) {
+ for (let j = 0; j < cursors.length; j++) {
+ cursors[j].next();
}
refreshSessionsAndVerifyCount(config, 5);
verifyOpenCursorCount(config, 5);
- let sessionsCollectionArray;
- let lastUseValues = [];
- for (let i = 0; i < 3; i++) {
- for (let j = 0; j < cursors.length; j++) {
- cursors[j].next();
- }
-
- refreshSessionsAndVerifyCount(config, 5);
- verifyOpenCursorCount(config, 5);
-
- sessionsCollectionArray = getSessions(config);
+ sessionsCollectionArray = getSessions(config);
- if (i == 0) {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- lastUseValues.push(sessionsCollectionArray[j].lastUse);
- }
- } else {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
- lastUseValues[j] = sessionsCollectionArray[j].lastUse;
- }
+ if (i == 0) {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ lastUseValues.push(sessionsCollectionArray[j].lastUse);
+ }
+ } else {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
+ lastUseValues[j] = sessionsCollectionArray[j].lastUse;
}
}
-
- // 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
- // cursors.
- assert.commandWorked(config.system.sessions.remove({}));
-
- refreshSessionsAndVerifyCount(config, 0);
- verifyOpenCursorCount(config, 0);
-
- for (let i = 0; i < cursors.length; i++) {
- assert.commandFailedWithCode(
- db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
- }
-
- // 4. Verify that an expired session (simulated by manual deletion) that has a currently running
- // operation will be vivified during the logical session cache refresh.
- let pinnedCursorSession = db.getMongo().startSession();
- withPinnedCursor({
- conn: conn,
- db: pinnedCursorSession.getDatabase(dbName),
- assertFunction: (cursorId, coll) => {
- assert.commandWorked(config.system.sessions.remove({}));
-
- refreshSessionsAndVerifyCount(config, 1);
- verifyOpenCursorCount(config, 1);
-
- let db = coll.getDB();
- assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
- },
- sessionId: pinnedCursorSession,
- runGetMoreFunc: () => {
- assert.commandFailed(
- db.runCommand({getMore: cursorId, collection: collName, lsid: sessionId}));
- },
- failPointName: failPointName,
- });
-
- MongoRunner.stopMongod(conn);
+}
+
+// 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
+// cursors.
+assert.commandWorked(config.system.sessions.remove({}));
+
+refreshSessionsAndVerifyCount(config, 0);
+verifyOpenCursorCount(config, 0);
+
+for (let i = 0; i < cursors.length; i++) {
+ assert.commandFailedWithCode(
+ db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+}
+
+// 4. Verify that an expired session (simulated by manual deletion) that has a currently running
+// operation will be vivified during the logical session cache refresh.
+let pinnedCursorSession = db.getMongo().startSession();
+withPinnedCursor({
+ conn: conn,
+ db: pinnedCursorSession.getDatabase(dbName),
+ assertFunction: (cursorId, coll) => {
+ assert.commandWorked(config.system.sessions.remove({}));
+
+ refreshSessionsAndVerifyCount(config, 1);
+ verifyOpenCursorCount(config, 1);
+
+ let db = coll.getDB();
+ assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
+ },
+ sessionId: pinnedCursorSession,
+ runGetMoreFunc: () => {
+ assert.commandFailed(
+ db.runCommand({getMore: cursorId, collection: collName, lsid: sessionId}));
+ },
+ failPointName: failPointName,
+});
+
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/view_catalog_deadlock_with_rename.js b/jstests/noPassthrough/view_catalog_deadlock_with_rename.js
index ec6e6dd107c..ec3aa9d21fd 100644
--- a/jstests/noPassthrough/view_catalog_deadlock_with_rename.js
+++ b/jstests/noPassthrough/view_catalog_deadlock_with_rename.js
@@ -7,30 +7,30 @@
* The fix is to always lock 'system.views' collection in the end.
*/
(function() {
- 'use strict';
+'use strict';
- const conn = MongoRunner.runMongod();
- const db = conn.getDB('test');
+const conn = MongoRunner.runMongod();
+const db = conn.getDB('test');
- assert.commandWorked(db.runCommand({insert: 'a', documents: [{x: 1}]}));
- assert.commandWorked(db.runCommand({insert: 'b', documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: 'a', documents: [{x: 1}]}));
+assert.commandWorked(db.runCommand({insert: 'b', documents: [{x: 1}]}));
- assert.commandWorked(db.createView('viewA', 'a', []));
+assert.commandWorked(db.createView('viewA', 'a', []));
- // Will cause a view catalog reload.
- assert.commandWorked(db.runCommand(
- {insert: 'system.views', documents: [{_id: 'test.viewB', viewOn: 'b', pipeline: []}]}));
+// Will cause a view catalog reload.
+assert.commandWorked(db.runCommand(
+ {insert: 'system.views', documents: [{_id: 'test.viewB', viewOn: 'b', pipeline: []}]}));
- const renameSystemViews = startParallelShell(function() {
- // This used to first lock 'test.system.views' and then 'test.aaabb' in X mode.
- assert.commandWorked(
- db.adminCommand({renameCollection: 'test.system.views', to: 'test.aaabb'}));
- }, conn.port);
+const renameSystemViews = startParallelShell(function() {
+ // This used to first lock 'test.system.views' and then 'test.aaabb' in X mode.
+ assert.commandWorked(
+ db.adminCommand({renameCollection: 'test.system.views', to: 'test.aaabb'}));
+}, conn.port);
- // This triggers view catalog reload. Therefore it first locked 'test.aaabb' in IX mode and then
- // 'test.system.views' in IS mode.
- assert.commandWorked(db.runCommand({delete: 'aaabb', deletes: [{q: {x: 2}, limit: 1}]}));
+// This triggers view catalog reload. Therefore it first locked 'test.aaabb' in IX mode and then
+// 'test.system.views' in IS mode.
+assert.commandWorked(db.runCommand({delete: 'aaabb', deletes: [{q: {x: 2}, limit: 1}]}));
- renameSystemViews();
- MongoRunner.stopMongod(conn);
+renameSystemViews();
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/views_legacy.js b/jstests/noPassthrough/views_legacy.js
index c52aca93bc3..8ded34730ae 100644
--- a/jstests/noPassthrough/views_legacy.js
+++ b/jstests/noPassthrough/views_legacy.js
@@ -3,82 +3,81 @@
* legacy write mode. Also confirms that legacy killCursors execution is successful.
*/
(function() {
- "use strict";
-
- let conn = MongoRunner.runMongod({});
-
- let viewsDB = conn.getDB("views_legacy");
- assert.commandWorked(viewsDB.dropDatabase());
- assert.commandWorked(viewsDB.createView("view", "collection", []));
- let coll = viewsDB.getCollection("collection");
-
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(coll.insert({a: i}));
- }
-
- conn.forceReadMode("legacy");
- conn.forceWriteMode("legacy");
-
- //
- // Legacy getMore is explicitly prohibited on views; you must use the getMore command.
- //
- let cmdRes =
- viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
- assert.commandWorked(cmdRes);
- let cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
-
- let err = assert.throws(function() {
- cursor.itcount();
- }, [], "Legacy getMore expected to fail on a view cursor");
- assert.eq(ErrorCodes.CommandNotSupportedOnView, err.code, tojson(err));
-
- //
- // Legacy killcursors is expected to work on views.
- //
- cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
- assert.commandWorked(cmdRes);
- cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
-
- // When DBCommandCursor is constructed under legacy readMode, cursor.close() will execute a
- // legacy killcursors operation.
- cursor.close();
- assert.gleSuccess(viewsDB, "legacy killcursors expected to work on view cursor");
-
- //
- // A view should reject all write CRUD operations performed in legacy write mode.
- //
- viewsDB.view.insert({x: 1});
- assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
-
- viewsDB.view.remove({x: 1});
- assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
-
- viewsDB.view.update({x: 1}, {x: 2});
- assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
-
- //
- // Legacy find is explicitly prohibited on views; you must use the find command.
- //
- let res = assert.throws(function() {
- viewsDB.view.find({x: 1}).toArray();
- });
- assert.eq(res.code, ErrorCodes.CommandNotSupportedOnView, tojson(res));
-
- // Ensure that legacy getMore succeeds even when a cursor is established on a namespace whose
- // database does not exist. Legacy getMore must check that the cursor is not over a view, and
- // this must handle the case where the namespace is not a view by virtue of the database not
- // existing.
- assert.commandWorked(viewsDB.dropDatabase());
-
- cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
- assert.commandWorked(cmdRes);
- cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
- assert.eq(0, cursor.itcount());
-
- cmdRes = viewsDB.runCommand({aggregate: "view", pipeline: [], cursor: {batchSize: 0}});
- assert.commandWorked(cmdRes);
- cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
- assert.eq(0, cursor.itcount());
-
- MongoRunner.stopMongod(conn);
+"use strict";
+
+let conn = MongoRunner.runMongod({});
+
+let viewsDB = conn.getDB("views_legacy");
+assert.commandWorked(viewsDB.dropDatabase());
+assert.commandWorked(viewsDB.createView("view", "collection", []));
+let coll = viewsDB.getCollection("collection");
+
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(coll.insert({a: i}));
+}
+
+conn.forceReadMode("legacy");
+conn.forceWriteMode("legacy");
+
+//
+// Legacy getMore is explicitly prohibited on views; you must use the getMore command.
+//
+let cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
+assert.commandWorked(cmdRes);
+let cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
+
+let err = assert.throws(function() {
+ cursor.itcount();
+}, [], "Legacy getMore expected to fail on a view cursor");
+assert.eq(ErrorCodes.CommandNotSupportedOnView, err.code, tojson(err));
+
+//
+// Legacy killcursors is expected to work on views.
+//
+cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
+assert.commandWorked(cmdRes);
+cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
+
+// When DBCommandCursor is constructed under legacy readMode, cursor.close() will execute a
+// legacy killcursors operation.
+cursor.close();
+assert.gleSuccess(viewsDB, "legacy killcursors expected to work on view cursor");
+
+//
+// A view should reject all write CRUD operations performed in legacy write mode.
+//
+viewsDB.view.insert({x: 1});
+assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
+
+viewsDB.view.remove({x: 1});
+assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
+
+viewsDB.view.update({x: 1}, {x: 2});
+assert.gleErrorCode(viewsDB, ErrorCodes.CommandNotSupportedOnView);
+
+//
+// Legacy find is explicitly prohibited on views; you must use the find command.
+//
+let res = assert.throws(function() {
+ viewsDB.view.find({x: 1}).toArray();
+});
+assert.eq(res.code, ErrorCodes.CommandNotSupportedOnView, tojson(res));
+
+// Ensure that legacy getMore succeeds even when a cursor is established on a namespace whose
+// database does not exist. Legacy getMore must check that the cursor is not over a view, and
+// this must handle the case where the namespace is not a view by virtue of the database not
+// existing.
+assert.commandWorked(viewsDB.dropDatabase());
+
+cmdRes = viewsDB.runCommand({find: "view", filter: {a: {$gt: 0}}, sort: {a: 1}, batchSize: 0});
+assert.commandWorked(cmdRes);
+cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
+assert.eq(0, cursor.itcount());
+
+cmdRes = viewsDB.runCommand({aggregate: "view", pipeline: [], cursor: {batchSize: 0}});
+assert.commandWorked(cmdRes);
+cursor = new DBCommandCursor(viewsDB, cmdRes, 2);
+assert.eq(0, cursor.itcount());
+
+MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js b/jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js
index f048f2cbf04..43eec0690f9 100644
--- a/jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js
+++ b/jstests/noPassthrough/wiredTigerMaxCacheOverflowSizeGB_serverParameter.js
@@ -5,18 +5,18 @@
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/noPassthrough/libs/server_parameter_helpers.js");
+load("jstests/noPassthrough/libs/server_parameter_helpers.js");
- // Valid parameter values are in the range [0.1, infinity) or 0 (unbounded).
- testNumericServerParameter("wiredTigerMaxCacheOverflowSizeGB",
- false /*isStartupParameter*/,
- true /*isRuntimeParameter*/,
- 0 /*defaultValue*/,
- 0.1 /*nonDefaultValidValue*/,
- true /*hasLowerBound*/,
- 0.09 /*lowerOutOfBounds*/,
- false /*hasUpperBound*/,
- "unused" /*upperOutOfBounds*/);
+// Valid parameter values are in the range [0.1, infinity) or 0 (unbounded).
+testNumericServerParameter("wiredTigerMaxCacheOverflowSizeGB",
+ false /*isStartupParameter*/,
+ true /*isRuntimeParameter*/,
+ 0 /*defaultValue*/,
+ 0.1 /*nonDefaultValidValue*/,
+ true /*hasLowerBound*/,
+ 0.09 /*lowerOutOfBounds*/,
+ false /*hasUpperBound*/,
+ "unused" /*upperOutOfBounds*/);
})();
diff --git a/jstests/noPassthrough/write_conflict_wildcard.js b/jstests/noPassthrough/write_conflict_wildcard.js
index f5662cdf119..ae2fcca5fc4 100644
--- a/jstests/noPassthrough/write_conflict_wildcard.js
+++ b/jstests/noPassthrough/write_conflict_wildcard.js
@@ -3,37 +3,35 @@
* interacting with the storage layer to retrieve multikey paths.
*/
(function() {
- "strict";
+"strict";
- const conn = MongoRunner.runMongod();
- const testDB = conn.getDB("test");
+const conn = MongoRunner.runMongod();
+const testDB = conn.getDB("test");
- const coll = testDB.write_conflict_wildcard;
- coll.drop();
+const coll = testDB.write_conflict_wildcard;
+coll.drop();
- assert.commandWorked(coll.createIndex({"$**": 1}));
+assert.commandWorked(coll.createIndex({"$**": 1}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: 'WTWriteConflictExceptionForReads',
- mode: {activationProbability: 0.01}
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTWriteConflictExceptionForReads', mode: {activationProbability: 0.01}}));
+for (let i = 0; i < 1000; ++i) {
+ // Insert documents with a couple different multikey paths to increase the number of records
+ // scanned during multikey path computation in the wildcard index.
+ assert.commandWorked(coll.insert({
+ _id: i,
+ i: i,
+ a: [{x: i - 1}, {x: i}, {x: i + 1}],
+ b: [],
+ longerName: [{nested: [1, 2]}, {nested: 4}]
}));
- for (let i = 0; i < 1000; ++i) {
- // Insert documents with a couple different multikey paths to increase the number of records
- // scanned during multikey path computation in the wildcard index.
- assert.commandWorked(coll.insert({
- _id: i,
- i: i,
- a: [{x: i - 1}, {x: i}, {x: i + 1}],
- b: [],
- longerName: [{nested: [1, 2]}, {nested: 4}]
- }));
- assert.eq(coll.find({i: i}).hint({"$**": 1}).itcount(), 1);
- if (i > 0) {
- assert.eq(coll.find({"a.x": i}).hint({"$**": 1}).itcount(), 2);
- }
+ assert.eq(coll.find({i: i}).hint({"$**": 1}).itcount(), 1);
+ if (i > 0) {
+ assert.eq(coll.find({"a.x": i}).hint({"$**": 1}).itcount(), 2);
}
+}
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"}));
- MongoRunner.stopMongod(conn);
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"}));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/write_local.js b/jstests/noPassthrough/write_local.js
index 6eff980d4a9..252baf3d871 100644
--- a/jstests/noPassthrough/write_local.js
+++ b/jstests/noPassthrough/write_local.js
@@ -1,46 +1,45 @@
// SERVER-22011: Deadlock in ticket distribution
// @tags: [requires_replication, requires_capped]
(function() {
- 'use strict';
+'use strict';
- // Limit concurrent WiredTiger transactions to maximize locking issues, harmless for other SEs.
- var options = {verbose: 1};
+// Limit concurrent WiredTiger transactions to maximize locking issues, harmless for other SEs.
+var options = {verbose: 1};
- // Create a new single node replicaSet
- var replTest =
- new ReplSetTest({name: "write_local", nodes: 1, oplogSize: 1, nodeOptions: options});
- replTest.startSet();
- replTest.initiate();
- var mongod = replTest.getPrimary();
- mongod.adminCommand({setParameter: 1, wiredTigerConcurrentWriteTransactions: 1});
+// Create a new single node replicaSet
+var replTest = new ReplSetTest({name: "write_local", nodes: 1, oplogSize: 1, nodeOptions: options});
+replTest.startSet();
+replTest.initiate();
+var mongod = replTest.getPrimary();
+mongod.adminCommand({setParameter: 1, wiredTigerConcurrentWriteTransactions: 1});
- var local = mongod.getDB('local');
+var local = mongod.getDB('local');
- // Start inserting documents in test.capped and local.capped capped collections.
- var shells = ['test', 'local'].map(function(dbname) {
- var mydb = local.getSiblingDB(dbname);
- mydb.capped.drop();
- mydb.createCollection('capped', {capped: true, size: 20 * 1000});
- return startParallelShell('var mydb=db.getSiblingDB("' + dbname + '"); ' +
- '(function() { ' +
- ' for(var i=0; i < 10*1000; i++) { ' +
- ' mydb.capped.insert({ x: i }); ' +
- ' } ' +
- '})();',
- mongod.port);
- });
+// Start inserting documents in test.capped and local.capped capped collections.
+var shells = ['test', 'local'].map(function(dbname) {
+ var mydb = local.getSiblingDB(dbname);
+ mydb.capped.drop();
+ mydb.createCollection('capped', {capped: true, size: 20 * 1000});
+ return startParallelShell('var mydb=db.getSiblingDB("' + dbname + '"); ' +
+ '(function() { ' +
+ ' for(var i=0; i < 10*1000; i++) { ' +
+ ' mydb.capped.insert({ x: i }); ' +
+ ' } ' +
+ '})();',
+ mongod.port);
+});
- // The following causes inconsistent locking order in the ticket system, depending on
- // timeouts to avoid deadlock.
- var oldObjects = 0;
- for (var i = 0; i < 1000; i++) {
- print(local.stats().objects);
- sleep(1);
- }
+// The following causes inconsistent locking order in the ticket system, depending on
+// timeouts to avoid deadlock.
+var oldObjects = 0;
+for (var i = 0; i < 1000; i++) {
+ print(local.stats().objects);
+ sleep(1);
+}
- // Wait for parallel shells to terminate and stop our replset.
- shells.forEach((function(f) {
- f();
- }));
- replTest.stopSet();
+// Wait for parallel shells to terminate and stop our replset.
+shells.forEach((function(f) {
+ f();
+}));
+replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/wt_cache_full.js b/jstests/noPassthrough/wt_cache_full.js
index a5f08aa0815..29be77da891 100644
--- a/jstests/noPassthrough/wt_cache_full.js
+++ b/jstests/noPassthrough/wt_cache_full.js
@@ -3,63 +3,62 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- // Constrain the storage engine cache size to make it easier to fill it up with
- // unflushed modifications.
- wiredTigerCacheSizeGB: 1,
- },
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ // Constrain the storage engine cache size to make it easier to fill it up with
+ // unflushed modifications.
+ wiredTigerCacheSizeGB: 1,
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- const numUpdates = 500;
- const secondary = rst.getSecondary();
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+const numUpdates = 500;
+const secondary = rst.getSecondary();
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
- }
+jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
}
+}
- jsTestLog('Applying updates on secondary ' + secondary.host);
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- rst.awaitReplication();
+jsTestLog('Applying updates on secondary ' + secondary.host);
+assert.commandWorked(secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js b/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js
index 6c438f7e79e..71383f91454 100644
--- a/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js
+++ b/jstests/noPassthrough/wt_cache_full_multi_oplog_txns.js
@@ -3,71 +3,71 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
- },
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- ],
- nodeOptions: {
- // Constrain the storage engine cache size to make it easier to fill it up with
- // unflushed modifications.
- wiredTigerCacheSizeGB: 1,
},
- });
- const nodes = rst.startSet();
- rst.initiate();
+ ],
+ nodeOptions: {
+ // Constrain the storage engine cache size to make it easier to fill it up with
+ // unflushed modifications.
+ wiredTigerCacheSizeGB: 1,
+ },
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- const numUpdates = 500;
- const secondary = rst.getSecondary();
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+const numUpdates = 500;
+const secondary = rst.getSecondary();
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
- const session = primary.startSession();
- const sessionDB = session.getDatabase(mydb.getName());
- const sessionColl = sessionDB.getCollection(coll.getName());
- session.startTransaction();
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(sessionColl.update({_id: i}, {$inc: {i: 1}}));
- }
+jsTestLog('Buffering ' + numUpdates + ' updates to ' + numDocs + ' documents on secondary.');
+const session = primary.startSession();
+const sessionDB = session.getDatabase(mydb.getName());
+const sessionColl = sessionDB.getCollection(coll.getName());
+session.startTransaction();
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(sessionColl.update({_id: i}, {$inc: {i: 1}}));
}
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
+}
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
- jsTestLog('Applying updates on secondary ' + secondary.host);
+jsTestLog('Applying updates on secondary ' + secondary.host);
- // If the secondary is unable to apply all the operations in the unprepared transaction within
- // a single batch with the constrained cache settings, the replica set will not reach a stable
- // state.
- rst.awaitReplication();
+// If the secondary is unable to apply all the operations in the unprepared transaction within
+// a single batch with the constrained cache settings, the replica set will not reach a stable
+// state.
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_cache_full_restart.js b/jstests/noPassthrough/wt_cache_full_restart.js
index 5ee7fa9c935..29aed83c67f 100644
--- a/jstests/noPassthrough/wt_cache_full_restart.js
+++ b/jstests/noPassthrough/wt_cache_full_restart.js
@@ -3,68 +3,68 @@
* @tags: [requires_replication, requires_persistence, requires_wiredtiger]
*/
(function() {
- 'use strict';
+'use strict';
- const rst = new ReplSetTest({
- nodes: [
- {
- slowms: 30000, // Don't log slow operations on primary.
+const rst = new ReplSetTest({
+ nodes: [
+ {
+ slowms: 30000, // Don't log slow operations on primary.
+ },
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- },
- // Constrain the storage engine cache size to make it easier to fill it up with
- // unflushed modifications.
- wiredTigerCacheSizeGB: 1,
- },
- ]
- });
- const nodes = rst.startSet();
- rst.initiate();
+ // Constrain the storage engine cache size to make it easier to fill it up with
+ // unflushed modifications.
+ wiredTigerCacheSizeGB: 1,
+ },
+ ]
+});
+const nodes = rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const mydb = primary.getDB('test');
- const coll = mydb.getCollection('t');
+const primary = rst.getPrimary();
+const mydb = primary.getDB('test');
+const coll = mydb.getCollection('t');
- const numDocs = 2;
- const minDocSizeMB = 10;
+const numDocs = 2;
+const minDocSizeMB = 10;
- for (let i = 0; i < numDocs; ++i) {
- assert.writeOK(
- coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
- {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- }
- assert.eq(numDocs, coll.find().itcount());
+for (let i = 0; i < numDocs; ++i) {
+ assert.writeOK(
+ coll.save({_id: i, i: 0, x: 'x'.repeat(minDocSizeMB * 1024 * 1024)},
+ {writeConcern: {w: nodes.length, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+}
+assert.eq(numDocs, coll.find().itcount());
- const numUpdates = 500;
- let secondary = rst.getSecondary();
- const batchOpsLimit =
- assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
- .replBatchLimitOperations;
- jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' +
- batchOpsLimit + ' operations per batch.');
+const numUpdates = 500;
+let secondary = rst.getSecondary();
+const batchOpsLimit =
+ assert.commandWorked(secondary.adminCommand({getParameter: 1, replBatchLimitOperations: 1}))
+ .replBatchLimitOperations;
+jsTestLog('Oplog application on secondary ' + secondary.host + ' is limited to ' + batchOpsLimit +
+ ' operations per batch.');
- jsTestLog('Stopping secondary ' + secondary.host + '.');
- rst.stop(1);
- jsTestLog('Stopped secondary. Writing ' + numUpdates + ' updates to ' + numDocs +
- ' documents on primary ' + primary.host + '.');
- const startTime = Date.now();
- for (let i = 0; i < numDocs; ++i) {
- for (let j = 0; j < numUpdates; ++j) {
- assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
- }
+jsTestLog('Stopping secondary ' + secondary.host + '.');
+rst.stop(1);
+jsTestLog('Stopped secondary. Writing ' + numUpdates + ' updates to ' + numDocs +
+ ' documents on primary ' + primary.host + '.');
+const startTime = Date.now();
+for (let i = 0; i < numDocs; ++i) {
+ for (let j = 0; j < numUpdates; ++j) {
+ assert.writeOK(coll.update({_id: i}, {$inc: {i: 1}}));
}
- const totalTime = Date.now() - startTime;
- jsTestLog('Wrote ' + numUpdates + ' updates to ' + numDocs + ' documents on primary ' +
- primary.host + '. Elapsed: ' + totalTime + ' ms.');
+}
+const totalTime = Date.now() - startTime;
+jsTestLog('Wrote ' + numUpdates + ' updates to ' + numDocs + ' documents on primary ' +
+ primary.host + '. Elapsed: ' + totalTime + ' ms.');
- secondary = rst.restart(1);
- jsTestLog('Restarted secondary ' + secondary.host +
- '. Waiting for secondary to apply updates from primary.');
- rst.awaitReplication();
+secondary = rst.restart(1);
+jsTestLog('Restarted secondary ' + secondary.host +
+ '. Waiting for secondary to apply updates from primary.');
+rst.awaitReplication();
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
index ae7f6eac4af..3470a04e24b 100644
--- a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
+++ b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
@@ -8,68 +8,68 @@
* @tags: [resource_intensive]
*/
(function() {
- "use strict";
+"use strict";
- // Skip this test if running with --nojournal and WiredTiger.
- if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
- }
+// Skip this test if running with --nojournal and WiredTiger.
+if (jsTest.options().noJournal &&
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
+ print("Skipping test because running WiredTiger without journaling isn't a valid" +
+ " replica set configuration");
+ return;
+}
- // Skip db hash check because delayed secondary will not catch up to primary.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check because delayed secondary will not catch up to primary.
+TestData.skipCheckDBHashes = true;
- // Skip this test if not running with the "wiredTiger" storage engine.
- var storageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (storageEngine !== "wiredTiger") {
- print('Skipping test because storageEngine is not "wiredTiger"');
- return;
- } else if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
- // Readers of old data, such as a lagged secondary, can lead to stalls when using
- // WiredTiger's LSM tree.
- print("WT-3742: Skipping test because we're running with WiredTiger's LSM tree");
- return;
- } else {
- var rst = new ReplSetTest({
- nodes: 2,
- // We are going to insert at least 100 MB of data with a long slave
- // delay. Configure an appropriately large oplog size.
- oplogSize: 200,
- });
+// Skip this test if not running with the "wiredTiger" storage engine.
+var storageEngine = jsTest.options().storageEngine || "wiredTiger";
+if (storageEngine !== "wiredTiger") {
+ print('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+} else if (jsTest.options().wiredTigerCollectionConfigString === "type=lsm") {
+ // Readers of old data, such as a lagged secondary, can lead to stalls when using
+ // WiredTiger's LSM tree.
+ print("WT-3742: Skipping test because we're running with WiredTiger's LSM tree");
+ return;
+} else {
+ var rst = new ReplSetTest({
+ nodes: 2,
+ // We are going to insert at least 100 MB of data with a long slave
+ // delay. Configure an appropriately large oplog size.
+ oplogSize: 200,
+ });
- var conf = rst.getReplSetConfig();
- conf.members[1].votes = 1;
- conf.members[1].priority = 0;
- conf.members[1].slaveDelay = 24 * 60 * 60;
+ var conf = rst.getReplSetConfig();
+ conf.members[1].votes = 1;
+ conf.members[1].priority = 0;
+ conf.members[1].slaveDelay = 24 * 60 * 60;
- rst.startSet();
- // We cannot wait for a stable recovery timestamp due to the slaveDelay.
- rst.initiateWithAnyNodeAsPrimary(
- conf, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
- var master = rst.getPrimary(); // Waits for PRIMARY state.
+ rst.startSet();
+ // We cannot wait for a stable recovery timestamp due to the slaveDelay.
+ rst.initiateWithAnyNodeAsPrimary(
+ conf, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+ var master = rst.getPrimary(); // Waits for PRIMARY state.
- // Reconfigure primary with a small cache size so less data needs to be
- // inserted to make the cache full while trying to trigger a stall.
- assert.commandWorked(master.adminCommand(
- {setParameter: 1, "wiredTigerEngineRuntimeConfig": "cache_size=100MB"}));
+ // Reconfigure primary with a small cache size so less data needs to be
+ // inserted to make the cache full while trying to trigger a stall.
+ assert.commandWorked(master.adminCommand(
+ {setParameter: 1, "wiredTigerEngineRuntimeConfig": "cache_size=100MB"}));
- var coll = master.getCollection("test.coll");
- var bigstr = "a".repeat(4000);
+ var coll = master.getCollection("test.coll");
+ var bigstr = "a".repeat(4000);
- // Do not insert with a writeConcern because we want the delayed slave
- // to fall behind in replication. This is crucial apart from having a
- // readConcern to pin updates in memory on the primary. To prevent the
- // slave from falling off the oplog, we configure the oplog large enough
- // to accomodate all the inserts.
- for (var i = 0; i < 250; i++) {
- let batch = coll.initializeUnorderedBulkOp();
- for (var j = 0; j < 100; j++) {
- batch.insert({a: bigstr});
- }
- assert.writeOK(batch.execute());
+ // Do not insert with a writeConcern because we want the delayed slave
+ // to fall behind in replication. This is crucial apart from having a
+ // readConcern to pin updates in memory on the primary. To prevent the
+ // slave from falling off the oplog, we configure the oplog large enough
+ // to accomodate all the inserts.
+ for (var i = 0; i < 250; i++) {
+ let batch = coll.initializeUnorderedBulkOp();
+ for (var j = 0; j < 100; j++) {
+ batch.insert({a: bigstr});
}
- rst.stopSet();
+ assert.writeOK(batch.execute());
}
+ rst.stopSet();
+}
})();
diff --git a/jstests/noPassthrough/wt_disable_majority_reads.js b/jstests/noPassthrough/wt_disable_majority_reads.js
index 57249723d2c..65cba8a8588 100644
--- a/jstests/noPassthrough/wt_disable_majority_reads.js
+++ b/jstests/noPassthrough/wt_disable_majority_reads.js
@@ -1,32 +1,32 @@
// @tags: [requires_wiredtiger, requires_replication]
(function() {
- "use strict";
+"use strict";
- var rst = new ReplSetTest({
- nodes: [
- {"enableMajorityReadConcern": ""},
- {"enableMajorityReadConcern": "false"},
- {"enableMajorityReadConcern": "true"}
- ]
- });
- rst.startSet();
- rst.initiate();
- rst.awaitSecondaryNodes();
+var rst = new ReplSetTest({
+ nodes: [
+ {"enableMajorityReadConcern": ""},
+ {"enableMajorityReadConcern": "false"},
+ {"enableMajorityReadConcern": "true"}
+ ]
+});
+rst.startSet();
+rst.initiate();
+rst.awaitSecondaryNodes();
- rst.getPrimary().getDB("test").getCollection("test").insert({});
- rst.awaitReplication();
+rst.getPrimary().getDB("test").getCollection("test").insert({});
+rst.awaitReplication();
- // Node 0 is using the default, which is `enableMajorityReadConcern: true`. Thus a majority
- // read should succeed.
- assert.commandWorked(rst.nodes[0].getDB("test").runCommand(
- {"find": "test", "readConcern": {"level": "majority"}}));
- // Node 1 disables majority reads. Check for the appropriate error code.
- assert.commandFailedWithCode(rst.nodes[1].getDB("test").runCommand(
- {"find": "test", "readConcern": {"level": "majority"}}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
- // Same as Node 0.
- assert.commandWorked(rst.nodes[2].getDB("test").runCommand(
- {"find": "test", "readConcern": {"level": "majority"}}));
+// Node 0 is using the default, which is `enableMajorityReadConcern: true`. Thus a majority
+// read should succeed.
+assert.commandWorked(
+ rst.nodes[0].getDB("test").runCommand({"find": "test", "readConcern": {"level": "majority"}}));
+// Node 1 disables majority reads. Check for the appropriate error code.
+assert.commandFailedWithCode(
+ rst.nodes[1].getDB("test").runCommand({"find": "test", "readConcern": {"level": "majority"}}),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
+// Same as Node 0.
+assert.commandWorked(
+ rst.nodes[2].getDB("test").runCommand({"find": "test", "readConcern": {"level": "majority"}}));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_index_option_defaults.js b/jstests/noPassthrough/wt_index_option_defaults.js
index 46e91a174e6..ce4f1a1c78d 100644
--- a/jstests/noPassthrough/wt_index_option_defaults.js
+++ b/jstests/noPassthrough/wt_index_option_defaults.js
@@ -7,152 +7,151 @@
* inMemoryIndexConfigString.
*/
(function() {
- 'use strict';
-
- var engine = 'wiredTiger';
- if (jsTest.options().storageEngine) {
- engine = jsTest.options().storageEngine;
+'use strict';
+
+var engine = 'wiredTiger';
+if (jsTest.options().storageEngine) {
+ engine = jsTest.options().storageEngine;
+}
+
+// Skip this test if not running with the right storage engine.
+if (engine !== 'wiredTiger' && engine !== 'inMemory') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger" or "inMemory"');
+ return;
+}
+
+// Skip this test when 'xxxIndexConfigString' is already set in TestData.
+// TODO: This test can be enabled when MongoRunner supports combining WT config strings with
+// commas.
+if (jsTest.options()[engine + 'IndexConfigString']) {
+ jsTest.log('Skipping test because system-wide defaults for index options are already set');
+ return;
+}
+
+// Use different values for the same configuration string key to test that index-specific
+// options override collection-wide options, and that collection-wide options override
+// system-wide options.
+var systemWideConfigString = 'split_pct=70,';
+var collectionWideConfigString = 'split_pct=75,';
+var indexSpecificConfigString = 'split_pct=80,';
+
+// Start up a mongod with system-wide defaults for index options and create a collection without
+// any additional options. Tests than an index without any additional options should take on the
+// system-wide defaults, whereas an index with additional options should override the
+// system-wide defaults.
+runTest({});
+
+// Start up a mongod with system-wide defaults for index options and create a collection with
+// additional options. Tests than an index without any additional options should take on the
+// collection-wide defaults, whereas an index with additional options should override the
+// collection-wide defaults.
+runTest({indexOptionDefaults: collectionWideConfigString});
+
+function runTest(collOptions) {
+ var hasIndexOptionDefaults = collOptions.hasOwnProperty('indexOptionDefaults');
+
+ var dbpath = MongoRunner.dataPath + 'wt_index_option_defaults';
+ resetDbpath(dbpath);
+
+ // Start a mongod with system-wide defaults for engine-specific index options.
+ var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ [engine + 'IndexConfigString']: systemWideConfigString,
+ });
+ assert.neq(null, conn, 'mongod was unable to start up');
+
+ var testDB = conn.getDB('test');
+ var cmdObj = {create: 'coll'};
+
+ // Apply collection-wide defaults for engine-specific index options if any were
+ // specified.
+ if (hasIndexOptionDefaults) {
+ cmdObj.indexOptionDefaults = {
+ storageEngine: {[engine]: {configString: collOptions.indexOptionDefaults}}
+ };
}
+ assert.commandWorked(testDB.runCommand(cmdObj));
- // Skip this test if not running with the right storage engine.
- if (engine !== 'wiredTiger' && engine !== 'inMemory') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger" or "inMemory"');
- return;
- }
+ // Create an index that does not specify any engine-specific options.
+ assert.commandWorked(testDB.coll.createIndex({a: 1}, {name: 'without_options'}));
- // Skip this test when 'xxxIndexConfigString' is already set in TestData.
- // TODO: This test can be enabled when MongoRunner supports combining WT config strings with
- // commas.
- if (jsTest.options()[engine + 'IndexConfigString']) {
- jsTest.log('Skipping test because system-wide defaults for index options are already set');
- return;
- }
+ // Create an index that specifies engine-specific index options.
+ assert.commandWorked(testDB.coll.createIndex({b: 1}, {
+ name: 'with_options',
+ storageEngine: {[engine]: {configString: indexSpecificConfigString}}
+ }));
- // Use different values for the same configuration string key to test that index-specific
- // options override collection-wide options, and that collection-wide options override
- // system-wide options.
- var systemWideConfigString = 'split_pct=70,';
- var collectionWideConfigString = 'split_pct=75,';
- var indexSpecificConfigString = 'split_pct=80,';
-
- // Start up a mongod with system-wide defaults for index options and create a collection without
- // any additional options. Tests than an index without any additional options should take on the
- // system-wide defaults, whereas an index with additional options should override the
- // system-wide defaults.
- runTest({});
-
- // Start up a mongod with system-wide defaults for index options and create a collection with
- // additional options. Tests than an index without any additional options should take on the
- // collection-wide defaults, whereas an index with additional options should override the
- // collection-wide defaults.
- runTest({indexOptionDefaults: collectionWideConfigString});
-
- function runTest(collOptions) {
- var hasIndexOptionDefaults = collOptions.hasOwnProperty('indexOptionDefaults');
-
- var dbpath = MongoRunner.dataPath + 'wt_index_option_defaults';
- resetDbpath(dbpath);
-
- // Start a mongod with system-wide defaults for engine-specific index options.
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- [engine + 'IndexConfigString']: systemWideConfigString,
- });
- assert.neq(null, conn, 'mongod was unable to start up');
-
- var testDB = conn.getDB('test');
- var cmdObj = {create: 'coll'};
-
- // Apply collection-wide defaults for engine-specific index options if any were
- // specified.
- if (hasIndexOptionDefaults) {
- cmdObj.indexOptionDefaults = {
- storageEngine: {[engine]: {configString: collOptions.indexOptionDefaults}}
- };
- }
- assert.commandWorked(testDB.runCommand(cmdObj));
-
- // Create an index that does not specify any engine-specific options.
- assert.commandWorked(testDB.coll.createIndex({a: 1}, {name: 'without_options'}));
-
- // Create an index that specifies engine-specific index options.
- assert.commandWorked(testDB.coll.createIndex({b: 1}, {
- name: 'with_options',
- storageEngine: {[engine]: {configString: indexSpecificConfigString}}
- }));
-
- var collStats = testDB.runCommand({collStats: 'coll'});
- assert.commandWorked(collStats);
-
- checkIndexWithoutOptions(collStats.indexDetails);
- checkIndexWithOptions(collStats.indexDetails);
-
- MongoRunner.stopMongod(conn);
-
- function checkIndexWithoutOptions(indexDetails) {
- var indexSpec = getIndexSpecByName(testDB.coll, 'without_options');
- assert(!indexSpec.hasOwnProperty('storageEngine'),
- 'no storage engine options should have been set in the index spec: ' +
- tojson(indexSpec));
-
- var creationString = indexDetails.without_options.creationString;
- if (hasIndexOptionDefaults) {
- assert.eq(-1,
- creationString.indexOf(systemWideConfigString),
- 'system-wide index option present in the creation string even though a ' +
- 'collection-wide option was specified: ' + creationString);
- assert.lte(0,
- creationString.indexOf(collectionWideConfigString),
- 'collection-wide index option not present in the creation string: ' +
- creationString);
- } else {
- assert.lte(0,
- creationString.indexOf(systemWideConfigString),
- 'system-wide index option not present in the creation string: ' +
- creationString);
- assert.eq(-1,
- creationString.indexOf(collectionWideConfigString),
- 'collection-wide index option present in creation string even though ' +
- 'it was not specified: ' + creationString);
- }
+ var collStats = testDB.runCommand({collStats: 'coll'});
+ assert.commandWorked(collStats);
- assert.eq(-1,
- creationString.indexOf(indexSpecificConfigString),
- 'index-specific option present in creation string even though it was not' +
- ' specified: ' + creationString);
- }
+ checkIndexWithoutOptions(collStats.indexDetails);
+ checkIndexWithOptions(collStats.indexDetails);
- function checkIndexWithOptions(indexDetails) {
- var indexSpec = getIndexSpecByName(testDB.coll, 'with_options');
- assert(indexSpec.hasOwnProperty('storageEngine'),
- 'storage engine options should have been set in the index spec: ' +
- tojson(indexSpec));
- assert.docEq({[engine]: {configString: indexSpecificConfigString}},
- indexSpec.storageEngine,
- engine + ' index options not present in the index spec');
+ MongoRunner.stopMongod(conn);
- var creationString = indexDetails.with_options.creationString;
+ function checkIndexWithoutOptions(indexDetails) {
+ var indexSpec = getIndexSpecByName(testDB.coll, 'without_options');
+ assert(!indexSpec.hasOwnProperty('storageEngine'),
+ 'no storage engine options should have been set in the index spec: ' +
+ tojson(indexSpec));
+
+ var creationString = indexDetails.without_options.creationString;
+ if (hasIndexOptionDefaults) {
assert.eq(-1,
creationString.indexOf(systemWideConfigString),
- 'system-wide index option present in the creation string even though an ' +
- 'index-specific option was specified: ' + creationString);
- assert.eq(-1,
- creationString.indexOf(collectionWideConfigString),
- 'system-wide index option present in the creation string even though an ' +
- 'index-specific option was specified: ' + creationString);
+ 'system-wide index option present in the creation string even though a ' +
+ 'collection-wide option was specified: ' + creationString);
+ assert.lte(0,
+ creationString.indexOf(collectionWideConfigString),
+ 'collection-wide index option not present in the creation string: ' +
+ creationString);
+ } else {
assert.lte(
0,
- creationString.indexOf(indexSpecificConfigString),
- 'index-specific option not present in the creation string: ' + creationString);
+ creationString.indexOf(systemWideConfigString),
+ 'system-wide index option not present in the creation string: ' + creationString);
+ assert.eq(-1,
+ creationString.indexOf(collectionWideConfigString),
+ 'collection-wide index option present in creation string even though ' +
+ 'it was not specified: ' + creationString);
}
+
+ assert.eq(-1,
+ creationString.indexOf(indexSpecificConfigString),
+ 'index-specific option present in creation string even though it was not' +
+ ' specified: ' + creationString);
}
- function getIndexSpecByName(coll, indexName) {
- var indexes = coll.getIndexes().filter(function(spec) {
- return spec.name === indexName;
- });
- assert.eq(1, indexes.length, 'index "' + indexName + '" not found');
- return indexes[0];
+ function checkIndexWithOptions(indexDetails) {
+ var indexSpec = getIndexSpecByName(testDB.coll, 'with_options');
+ assert(
+ indexSpec.hasOwnProperty('storageEngine'),
+ 'storage engine options should have been set in the index spec: ' + tojson(indexSpec));
+ assert.docEq({[engine]: {configString: indexSpecificConfigString}},
+ indexSpec.storageEngine,
+ engine + ' index options not present in the index spec');
+
+ var creationString = indexDetails.with_options.creationString;
+ assert.eq(-1,
+ creationString.indexOf(systemWideConfigString),
+ 'system-wide index option present in the creation string even though an ' +
+ 'index-specific option was specified: ' + creationString);
+ assert.eq(-1,
+ creationString.indexOf(collectionWideConfigString),
+ 'system-wide index option present in the creation string even though an ' +
+ 'index-specific option was specified: ' + creationString);
+ assert.lte(0,
+ creationString.indexOf(indexSpecificConfigString),
+ 'index-specific option not present in the creation string: ' + creationString);
}
+}
+
+function getIndexSpecByName(coll, indexName) {
+ var indexes = coll.getIndexes().filter(function(spec) {
+ return spec.name === indexName;
+ });
+ assert.eq(1, indexes.length, 'index "' + indexName + '" not found');
+ return indexes[0];
+}
})();
diff --git a/jstests/noPassthrough/wt_malformed_creation_string.js b/jstests/noPassthrough/wt_malformed_creation_string.js
index 4067cca329f..e6ba7d08e31 100644
--- a/jstests/noPassthrough/wt_malformed_creation_string.js
+++ b/jstests/noPassthrough/wt_malformed_creation_string.js
@@ -2,59 +2,59 @@
* Tests that a null embedded malformed string is rejected gracefully.
*/
(function() {
- 'use strict';
-
- var engine = 'wiredTiger';
- if (jsTest.options().storageEngine) {
- engine = jsTest.options().storageEngine;
+'use strict';
+
+var engine = 'wiredTiger';
+if (jsTest.options().storageEngine) {
+ engine = jsTest.options().storageEngine;
+}
+
+// Skip this test if not running with the right storage engine.
+if (engine !== 'wiredTiger' && engine !== 'inMemory') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger" or "inMemory"');
+ return;
+}
+
+// Build an array of malformed strings to test
+var malformedStrings = ["\u0000000", "\0,", "bl\0ah", "split_pct=30,\0split_pct=35,"];
+
+// Start up a mongod.
+// Test that collection and index creation with malformed creation strings fail gracefully.
+runTest();
+
+function runTest() {
+ var dbpath = MongoRunner.dataPath + 'wt_malformed_creation_string';
+ resetDbpath(dbpath);
+
+ // Start a mongod
+ var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ });
+ assert.neq(null, conn, 'mongod was unable to start up');
+
+ var testDB = conn.getDB('test');
+
+ // Collection creation with malformed string should fail
+ for (var i = 0; i < malformedStrings.length; i++) {
+ assert.commandFailedWithCode(
+ testDB.createCollection(
+ 'coll', {storageEngine: {[engine]: {configString: malformedStrings[i]}}}),
+ ErrorCodes.FailedToParse);
}
- // Skip this test if not running with the right storage engine.
- if (engine !== 'wiredTiger' && engine !== 'inMemory') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger" or "inMemory"');
- return;
- }
+ // Create collection to test index creation on
+ assert.commandWorked(testDB.createCollection('coll'));
- // Build an array of malformed strings to test
- var malformedStrings = ["\u0000000", "\0,", "bl\0ah", "split_pct=30,\0split_pct=35,"];
-
- // Start up a mongod.
- // Test that collection and index creation with malformed creation strings fail gracefully.
- runTest();
-
- function runTest() {
- var dbpath = MongoRunner.dataPath + 'wt_malformed_creation_string';
- resetDbpath(dbpath);
-
- // Start a mongod
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- });
- assert.neq(null, conn, 'mongod was unable to start up');
-
- var testDB = conn.getDB('test');
-
- // Collection creation with malformed string should fail
- for (var i = 0; i < malformedStrings.length; i++) {
- assert.commandFailedWithCode(
- testDB.createCollection(
- 'coll', {storageEngine: {[engine]: {configString: malformedStrings[i]}}}),
- ErrorCodes.FailedToParse);
- }
-
- // Create collection to test index creation on
- assert.commandWorked(testDB.createCollection('coll'));
-
- // Index creation with malformed string should fail
- for (var i = 0; i < malformedStrings.length; i++) {
- assert.commandFailedWithCode(testDB.coll.createIndex({a: 1}, {
- name: 'with_malformed_str',
- storageEngine: {[engine]: {configString: malformedStrings[i]}}
- }),
- ErrorCodes.FailedToParse);
- }
-
- MongoRunner.stopMongod(conn);
+ // Index creation with malformed string should fail
+ for (var i = 0; i < malformedStrings.length; i++) {
+ assert.commandFailedWithCode(testDB.coll.createIndex({a: 1}, {
+ name: 'with_malformed_str',
+ storageEngine: {[engine]: {configString: malformedStrings[i]}}
+ }),
+ ErrorCodes.FailedToParse);
}
+
+ MongoRunner.stopMongod(conn);
+}
})();
diff --git a/jstests/noPassthrough/wt_nojournal_skip_recovery.js b/jstests/noPassthrough/wt_nojournal_skip_recovery.js
index 97988f84d83..df813e080d3 100644
--- a/jstests/noPassthrough/wt_nojournal_skip_recovery.js
+++ b/jstests/noPassthrough/wt_nojournal_skip_recovery.js
@@ -4,95 +4,95 @@
* Also verifies that deleting the journal/ directory allows those operations to safely be ignored.
*/
(function() {
- 'use strict';
+'use strict';
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
- // Skip this test until we figure out why journaled writes are replayed after last checkpoint.
- TestData.skipCollectionAndIndexValidation = true;
+// Skip this test until we figure out why journaled writes are replayed after last checkpoint.
+TestData.skipCollectionAndIndexValidation = true;
- var dbpath = MongoRunner.dataPath + 'wt_nojournal_skip_recovery';
- resetDbpath(dbpath);
+var dbpath = MongoRunner.dataPath + 'wt_nojournal_skip_recovery';
+resetDbpath(dbpath);
- // Start a mongod with journaling enabled.
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- journal: '',
- // Wait an hour between checkpoints to ensure one isn't created after the fsync command is
- // executed and before the mongod is terminated. This is necessary to ensure that exactly 90
- // documents with the 'journaled' field exist in the collection.
- wiredTigerEngineConfigString: 'checkpoint=(wait=3600)'
- });
- assert.neq(null, conn, 'mongod was unable to start up');
+// Start a mongod with journaling enabled.
+var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ journal: '',
+ // Wait an hour between checkpoints to ensure one isn't created after the fsync command is
+ // executed and before the mongod is terminated. This is necessary to ensure that exactly 90
+ // documents with the 'journaled' field exist in the collection.
+ wiredTigerEngineConfigString: 'checkpoint=(wait=3600)'
+});
+assert.neq(null, conn, 'mongod was unable to start up');
- // Execute unjournaled inserts, but periodically do a journaled insert. Triggers a checkpoint
- // prior to the mongod being terminated.
- var awaitShell = startParallelShell(function() {
- for (let loopNum = 1; true; ++loopNum) {
- var bulk = db.nojournal.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; ++i) {
- bulk.insert({unjournaled: i});
- }
- assert.writeOK(bulk.execute({j: false}));
- assert.writeOK(db.nojournal.insert({journaled: loopNum}, {writeConcern: {j: true}}));
+// Execute unjournaled inserts, but periodically do a journaled insert. Triggers a checkpoint
+// prior to the mongod being terminated.
+var awaitShell = startParallelShell(function() {
+ for (let loopNum = 1; true; ++loopNum) {
+ var bulk = db.nojournal.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; ++i) {
+ bulk.insert({unjournaled: i});
+ }
+ assert.writeOK(bulk.execute({j: false}));
+ assert.writeOK(db.nojournal.insert({journaled: loopNum}, {writeConcern: {j: true}}));
- // Create a checkpoint slightly before the mongod is terminated.
- if (loopNum === 90) {
- assert.commandWorked(db.adminCommand({fsync: 1}));
- }
+ // Create a checkpoint slightly before the mongod is terminated.
+ if (loopNum === 90) {
+ assert.commandWorked(db.adminCommand({fsync: 1}));
}
- }, conn.port);
+ }
+}, conn.port);
- // After some journaled write operations have been performed against the mongod, send a SIGKILL
- // to the process to trigger an unclean shutdown.
- assert.soon(
- function() {
- var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
- if (count >= 100) {
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- return true;
- }
- return false;
- },
- 'the parallel shell did not perform at least 100 journaled inserts',
- 5 * 60 * 1000 /*timeout ms*/);
+// After some journaled write operations have been performed against the mongod, send a SIGKILL
+// to the process to trigger an unclean shutdown.
+assert.soon(
+ function() {
+ var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
+ if (count >= 100) {
+ MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+ return true;
+ }
+ return false;
+ },
+ 'the parallel shell did not perform at least 100 journaled inserts',
+ 5 * 60 * 1000 /*timeout ms*/);
- var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
+var exitCode = awaitShell({checkExitSuccess: false});
+assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
- // Restart the mongod with journaling disabled, but configure it to error if the database needs
- // recovery.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- nojournal: '',
- wiredTigerEngineConfigString: 'log=(recover=error)',
- });
- assert.eq(null, conn, 'mongod should not have started up because it requires recovery');
+// Restart the mongod with journaling disabled, but configure it to error if the database needs
+// recovery.
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ nojournal: '',
+ wiredTigerEngineConfigString: 'log=(recover=error)',
+});
+assert.eq(null, conn, 'mongod should not have started up because it requires recovery');
- // Remove the journal files.
- assert(removeFile(dbpath + '/journal'), 'failed to remove the journal directory');
+// Remove the journal files.
+assert(removeFile(dbpath + '/journal'), 'failed to remove the journal directory');
- // Restart the mongod with journaling disabled again.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- nojournal: '',
- wiredTigerEngineConfigString: 'log=(recover=error)',
- });
- assert.neq(null, conn, 'mongod was unable to start up after removing the journal directory');
+// Restart the mongod with journaling disabled again.
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ nojournal: '',
+ wiredTigerEngineConfigString: 'log=(recover=error)',
+});
+assert.neq(null, conn, 'mongod was unable to start up after removing the journal directory');
- var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
- assert.lte(90, count, 'missing documents that were present in the last checkpoint');
- assert.gte(90,
- count,
- 'journaled write operations since the last checkpoint should not have been' +
- ' replayed');
+var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
+assert.lte(90, count, 'missing documents that were present in the last checkpoint');
+assert.gte(90,
+ count,
+ 'journaled write operations since the last checkpoint should not have been' +
+ ' replayed');
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/wt_nojournal_toggle.js b/jstests/noPassthrough/wt_nojournal_toggle.js
index f09f92f6a95..50d5483aa26 100644
--- a/jstests/noPassthrough/wt_nojournal_toggle.js
+++ b/jstests/noPassthrough/wt_nojournal_toggle.js
@@ -3,121 +3,121 @@
* when the mongod is killed and restarted with --nojournal.
*/
(function() {
- 'use strict';
-
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
-
- // Returns a function that primarily executes unjournaled inserts, but periodically does a
- // journaled insert. If 'checkpoint' is true, then the fsync command is run to create a
- // checkpoint prior to the mongod being terminated.
- function insertFunctionFactory(checkpoint) {
- var insertFunction = function() {
- for (var iter = 0; iter < 1000; ++iter) {
- var bulk = db.nojournal.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; ++i) {
- bulk.insert({unjournaled: i});
- }
- assert.writeOK(bulk.execute({j: false}));
- assert.writeOK(db.nojournal.insert({journaled: iter}, {writeConcern: {j: true}}));
- if (__checkpoint_template_placeholder__ && iter === 50) {
- assert.commandWorked(db.adminCommand({fsync: 1}));
- }
+'use strict';
+
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
+
+// Returns a function that primarily executes unjournaled inserts, but periodically does a
+// journaled insert. If 'checkpoint' is true, then the fsync command is run to create a
+// checkpoint prior to the mongod being terminated.
+function insertFunctionFactory(checkpoint) {
+ var insertFunction = function() {
+ for (var iter = 0; iter < 1000; ++iter) {
+ var bulk = db.nojournal.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; ++i) {
+ bulk.insert({unjournaled: i});
}
- };
-
- return '(' +
- insertFunction.toString().replace('__checkpoint_template_placeholder__',
- checkpoint.toString()) +
- ')();';
- }
-
- function runTest(options) {
- var dbpath = MongoRunner.dataPath + 'wt_nojournal_toggle';
- resetDbpath(dbpath);
-
- // Start a mongod with journaling enabled.
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- journal: '',
- });
- assert.neq(null, conn, 'mongod was unable to start up');
-
- // Run a mixture of journaled and unjournaled write operations against the mongod.
- var awaitShell = startParallelShell(insertFunctionFactory(options.checkpoint), conn.port);
-
- // After some journaled write operations have been performed against the mongod, send a
- // SIGKILL to the process to trigger an unclean shutdown.
- assert.soon(function() {
- var testDB = conn.getDB('test');
- var count = testDB.nojournal.count({journaled: {$exists: true}});
- if (count >= 100) {
- // We saw 100 journaled inserts, but visibility does not guarantee durability, so
- // do an extra journaled write to make all visible commits durable, before killing
- // the mongod.
- assert.writeOK(testDB.nojournal.insert({final: true}, {writeConcern: {j: true}}));
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- return true;
+ assert.writeOK(bulk.execute({j: false}));
+ assert.writeOK(db.nojournal.insert({journaled: iter}, {writeConcern: {j: true}}));
+ if (__checkpoint_template_placeholder__ && iter === 50) {
+ assert.commandWorked(db.adminCommand({fsync: 1}));
}
- return false;
- }, 'the parallel shell did not perform at least 100 journaled inserts');
-
- var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
-
- // Restart the mongod with journaling disabled.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- nojournal: '',
- });
- assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
-
+ }
+ };
+
+ return '(' +
+ insertFunction.toString().replace('__checkpoint_template_placeholder__',
+ checkpoint.toString()) +
+ ')();';
+}
+
+function runTest(options) {
+ var dbpath = MongoRunner.dataPath + 'wt_nojournal_toggle';
+ resetDbpath(dbpath);
+
+ // Start a mongod with journaling enabled.
+ var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ journal: '',
+ });
+ assert.neq(null, conn, 'mongod was unable to start up');
+
+ // Run a mixture of journaled and unjournaled write operations against the mongod.
+ var awaitShell = startParallelShell(insertFunctionFactory(options.checkpoint), conn.port);
+
+ // After some journaled write operations have been performed against the mongod, send a
+ // SIGKILL to the process to trigger an unclean shutdown.
+ assert.soon(function() {
var testDB = conn.getDB('test');
- assert.eq(1, testDB.nojournal.count({final: true}), 'final journaled write was not found');
- assert.lte(100,
- testDB.nojournal.count({journaled: {$exists: true}}),
- 'journaled write operations since the last checkpoint were not replayed');
-
- var initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
- assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
- assert.eq(initialNumLogWrites,
- testDB.serverStatus().wiredTiger.log['log write operations'],
- 'journaling is still enabled even though --nojournal was specified');
-
- MongoRunner.stopMongod(conn);
-
- // Restart the mongod with journaling enabled.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- journal: '',
- });
- assert.neq(null, conn, 'mongod was unable to start up after re-enabling journaling');
-
- // Change the database object to connect to the restarted mongod.
- testDB = conn.getDB('test');
- initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
-
- assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
- assert.lt(initialNumLogWrites,
- testDB.serverStatus().wiredTiger.log['log write operations'],
- 'journaling is still disabled even though --journal was specified');
-
- MongoRunner.stopMongod(conn);
- }
-
- // Operations from the journal should be replayed even when the mongod is terminated before
- // anything is written to disk.
- jsTest.log('Running the test without ever creating a checkpoint');
- runTest({checkpoint: false});
-
- // Repeat the test again, but ensure that some data is written to disk before the mongod is
- // terminated.
- jsTest.log('Creating a checkpoint part-way through running the test');
- runTest({checkpoint: true});
+ var count = testDB.nojournal.count({journaled: {$exists: true}});
+ if (count >= 100) {
+ // We saw 100 journaled inserts, but visibility does not guarantee durability, so
+ // do an extra journaled write to make all visible commits durable, before killing
+ // the mongod.
+ assert.writeOK(testDB.nojournal.insert({final: true}, {writeConcern: {j: true}}));
+ MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+ return true;
+ }
+ return false;
+ }, 'the parallel shell did not perform at least 100 journaled inserts');
+
+ var exitCode = awaitShell({checkExitSuccess: false});
+ assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
+
+ // Restart the mongod with journaling disabled.
+ conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ nojournal: '',
+ });
+ assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
+
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.nojournal.count({final: true}), 'final journaled write was not found');
+ assert.lte(100,
+ testDB.nojournal.count({journaled: {$exists: true}}),
+ 'journaled write operations since the last checkpoint were not replayed');
+
+ var initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
+ assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
+ assert.eq(initialNumLogWrites,
+ testDB.serverStatus().wiredTiger.log['log write operations'],
+ 'journaling is still enabled even though --nojournal was specified');
+
+ MongoRunner.stopMongod(conn);
+
+ // Restart the mongod with journaling enabled.
+ conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ journal: '',
+ });
+ assert.neq(null, conn, 'mongod was unable to start up after re-enabling journaling');
+
+ // Change the database object to connect to the restarted mongod.
+ testDB = conn.getDB('test');
+ initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
+
+ assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
+ assert.lt(initialNumLogWrites,
+ testDB.serverStatus().wiredTiger.log['log write operations'],
+ 'journaling is still disabled even though --journal was specified');
+
+ MongoRunner.stopMongod(conn);
+}
+
+// Operations from the journal should be replayed even when the mongod is terminated before
+// anything is written to disk.
+jsTest.log('Running the test without ever creating a checkpoint');
+runTest({checkpoint: false});
+
+// Repeat the test again, but ensure that some data is written to disk before the mongod is
+// terminated.
+jsTest.log('Creating a checkpoint part-way through running the test');
+runTest({checkpoint: true});
})();
diff --git a/jstests/noPassthrough/wt_operation_stats.js b/jstests/noPassthrough/wt_operation_stats.js
index b9c84e356f3..e273dd34170 100644
--- a/jstests/noPassthrough/wt_operation_stats.js
+++ b/jstests/noPassthrough/wt_operation_stats.js
@@ -7,83 +7,83 @@
// @tags: [requires_profiling]
(function() {
- 'use strict';
-
- load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
-
- const readStatRegx = /storage:{ data: { bytesRead: ([0-9]+)/;
-
- let checkLogStats = function() {
- // Check if the log output contains the expected statistics.
- let mongodLogs = rawMongoProgramOutput();
- let lines = mongodLogs.split('\n');
- let match;
- let logLineCount = 0;
- for (let line of lines) {
- if ((match = readStatRegx.exec(line)) !== null) {
- jsTestLog(line);
- logLineCount++;
- }
- }
- assert.gte(logLineCount, 1);
- };
-
- let checkSystemProfileStats = function(profileObj, statName) {
- // Check that the profiled operation contains the expected statistics.
- assert(profileObj.hasOwnProperty("storage"), tojson(profileObj));
- assert(profileObj.storage.hasOwnProperty("data"), tojson(profileObj));
- assert(profileObj.storage.data.hasOwnProperty(statName), tojson(profileObj));
- };
-
- // This test can only be run if the storageEngine is wiredTiger
- if (jsTest.options().storageEngine && (jsTest.options().storageEngine !== "wiredTiger")) {
- jsTestLog("Skipping test because storageEngine is not wiredTiger");
- } else {
- let name = "wt_op_stat";
-
- jsTestLog("run mongod");
- let conn = MongoRunner.runMongod();
- assert.neq(null, conn, "mongod was unable to start up");
- let testDB = conn.getDB(name);
-
- // Insert 200 documents of size 1K each, spanning multiple pages in the btree.
- let value = 'a'.repeat(1024);
-
- jsTestLog("insert data");
- for (let i = 0; i < 200; i++) {
- assert.writeOK(testDB.foo.insert({x: value}));
+'use strict';
+
+load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+
+const readStatRegx = /storage:{ data: { bytesRead: ([0-9]+)/;
+
+let checkLogStats = function() {
+ // Check if the log output contains the expected statistics.
+ let mongodLogs = rawMongoProgramOutput();
+ let lines = mongodLogs.split('\n');
+ let match;
+ let logLineCount = 0;
+ for (let line of lines) {
+ if ((match = readStatRegx.exec(line)) !== null) {
+ jsTestLog(line);
+ logLineCount++;
}
+ }
+ assert.gte(logLineCount, 1);
+};
+
+let checkSystemProfileStats = function(profileObj, statName) {
+ // Check that the profiled operation contains the expected statistics.
+ assert(profileObj.hasOwnProperty("storage"), tojson(profileObj));
+ assert(profileObj.storage.hasOwnProperty("data"), tojson(profileObj));
+ assert(profileObj.storage.data.hasOwnProperty(statName), tojson(profileObj));
+};
+
+// This test can only be run if the storageEngine is wiredTiger
+if (jsTest.options().storageEngine && (jsTest.options().storageEngine !== "wiredTiger")) {
+ jsTestLog("Skipping test because storageEngine is not wiredTiger");
+} else {
+ let name = "wt_op_stat";
+
+ jsTestLog("run mongod");
+ let conn = MongoRunner.runMongod();
+ assert.neq(null, conn, "mongod was unable to start up");
+ let testDB = conn.getDB(name);
+
+ // Insert 200 documents of size 1K each, spanning multiple pages in the btree.
+ let value = 'a'.repeat(1024);
+
+ jsTestLog("insert data");
+ for (let i = 0; i < 200; i++) {
+ assert.writeOK(testDB.foo.insert({x: value}));
+ }
- let connport = conn.port;
- MongoRunner.stopMongod(conn);
-
- // Restart the server
- conn = MongoRunner.runMongod({
- restart: true,
- port: connport,
- slowms: "0",
- });
-
- clearRawMongoProgramOutput();
-
- // Scan the collection and check the bytes read statistic in the slowop log and
- // system.profile.
- testDB = conn.getDB(name);
- testDB.setProfilingLevel(2);
- jsTestLog("read data");
- let cur = testDB.foo.find();
- while (cur.hasNext()) {
- cur.next();
- }
+ let connport = conn.port;
+ MongoRunner.stopMongod(conn);
+
+ // Restart the server
+ conn = MongoRunner.runMongod({
+ restart: true,
+ port: connport,
+ slowms: "0",
+ });
+
+ clearRawMongoProgramOutput();
+
+ // Scan the collection and check the bytes read statistic in the slowop log and
+ // system.profile.
+ testDB = conn.getDB(name);
+ testDB.setProfilingLevel(2);
+ jsTestLog("read data");
+ let cur = testDB.foo.find();
+ while (cur.hasNext()) {
+ cur.next();
+ }
- // Look for the storage statistics in the profiled output of the find command.
- let profileObj = getLatestProfilerEntry(testDB, {op: "query", ns: "wt_op_stat.foo"});
- checkSystemProfileStats(profileObj, "bytesRead");
+ // Look for the storage statistics in the profiled output of the find command.
+ let profileObj = getLatestProfilerEntry(testDB, {op: "query", ns: "wt_op_stat.foo"});
+ checkSystemProfileStats(profileObj, "bytesRead");
- // Stopping the mongod waits until all of its logs have been read by the mongo shell.
- MongoRunner.stopMongod(conn);
- checkLogStats();
+ // Stopping the mongod waits until all of its logs have been read by the mongo shell.
+ MongoRunner.stopMongod(conn);
+ checkLogStats();
- jsTestLog("Success!");
- }
+ jsTestLog("Success!");
+}
})();
diff --git a/jstests/noPassthrough/wt_prepare_conflict.js b/jstests/noPassthrough/wt_prepare_conflict.js
index 67fffd10210..c562c0eab2f 100644
--- a/jstests/noPassthrough/wt_prepare_conflict.js
+++ b/jstests/noPassthrough/wt_prepare_conflict.js
@@ -4,61 +4,49 @@
* @tag: [requires_wiredtiger]
*/
(function() {
- "strict";
-
- let conn = MongoRunner.runMongod();
- let testDB = conn.getDB("test");
-
- let t = testDB.prepare_conflict;
- t.drop();
-
- // Test different types of operations: removals, updates, and index operations.
- assert.commandWorked(t.createIndex({x: 1}));
- assert.commandWorked(
- t.createIndex({y: 1}, {partialFilterExpression: {_id: {$gte: 500}}, unique: true}));
- let rand = {"#RAND_INT": [0, 1000]};
- let ops = [
- {op: "remove", ns: t.getFullName(), query: {_id: rand}},
- {op: "findOne", ns: t.getFullName(), query: {_id: rand}},
- {
- op: "update",
- ns: t.getFullName(),
- query: {_id: rand},
- update: {$inc: {x: 1}},
- upsert: true
- },
- {op: "findOne", ns: t.getFullName(), query: {x: rand}},
- {
- op: "update",
- ns: t.getFullName(),
- query: {_id: rand},
- update: {$inc: {y: 1}},
- upsert: true
- },
- {op: "findOne", ns: t.getFullName(), query: {y: rand}},
- {op: "findOne", ns: t.getFullName(), query: {_id: rand}},
- ];
-
- let seconds = 5;
- let parallel = 5;
- let host = testDB.getMongo().host;
-
- let benchArgs = {ops, seconds, parallel, host};
-
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'WTAlwaysNotifyPrepareConflictWaiters', mode: 'alwaysOn'}));
-
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'WTPrepareConflictForReads', mode: {activationProbability: 0.05}}));
-
- res = benchRun(benchArgs);
-
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: 'WTPrepareConflictForReads', mode: "off"}));
-
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'WTAlwaysNotifyPrepareConflictWaiters', mode: 'off'}));
- res = t.validate();
- assert(res.valid, tojson(res));
- MongoRunner.stopMongod(conn);
+"strict";
+
+let conn = MongoRunner.runMongod();
+let testDB = conn.getDB("test");
+
+let t = testDB.prepare_conflict;
+t.drop();
+
+// Test different types of operations: removals, updates, and index operations.
+assert.commandWorked(t.createIndex({x: 1}));
+assert.commandWorked(
+ t.createIndex({y: 1}, {partialFilterExpression: {_id: {$gte: 500}}, unique: true}));
+let rand = {"#RAND_INT": [0, 1000]};
+let ops = [
+ {op: "remove", ns: t.getFullName(), query: {_id: rand}},
+ {op: "findOne", ns: t.getFullName(), query: {_id: rand}},
+ {op: "update", ns: t.getFullName(), query: {_id: rand}, update: {$inc: {x: 1}}, upsert: true},
+ {op: "findOne", ns: t.getFullName(), query: {x: rand}},
+ {op: "update", ns: t.getFullName(), query: {_id: rand}, update: {$inc: {y: 1}}, upsert: true},
+ {op: "findOne", ns: t.getFullName(), query: {y: rand}},
+ {op: "findOne", ns: t.getFullName(), query: {_id: rand}},
+];
+
+let seconds = 5;
+let parallel = 5;
+let host = testDB.getMongo().host;
+
+let benchArgs = {ops, seconds, parallel, host};
+
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTAlwaysNotifyPrepareConflictWaiters', mode: 'alwaysOn'}));
+
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'WTPrepareConflictForReads', mode: {activationProbability: 0.05}}));
+
+res = benchRun(benchArgs);
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTPrepareConflictForReads', mode: "off"}));
+
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: 'WTAlwaysNotifyPrepareConflictWaiters', mode: 'off'}));
+res = t.validate();
+assert(res.valid, tojson(res));
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js b/jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js
index 02d20790a40..c93eb10e415 100644
--- a/jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js
+++ b/jstests/noPassthrough/wt_skip_prepare_conflicts_retries_failpoint.js
@@ -6,64 +6,63 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const testDB = primary.getDB("test");
- const testColl = testDB.getCollection("wt_skip_prepare_conflict_retries_failpoint");
+const primary = rst.getPrimary();
+const testDB = primary.getDB("test");
+const testColl = testDB.getCollection("wt_skip_prepare_conflict_retries_failpoint");
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(testDB.getName());
- const sessionColl = sessionDB.getCollection(testColl.getName());
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(testDB.getName());
+const sessionColl = sessionDB.getCollection(testColl.getName());
- assert.commandWorked(testDB.runCommand({profile: 2}));
+assert.commandWorked(testDB.runCommand({profile: 2}));
- assert.commandWorked(
- testColl.insert({_id: 1, note: "from before transaction"}, {w: "majority"}));
+assert.commandWorked(testColl.insert({_id: 1, note: "from before transaction"}, {w: "majority"}));
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: "WTSkipPrepareConflictRetries", mode: "alwaysOn"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "WTSkipPrepareConflictRetries", mode: "alwaysOn"}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "alwaysOn"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "alwaysOn"}));
- // A non-transactional operation conflicting with a write operation performed inside a
- // multistatement transaction can encounter a WT_PREPARE_CONFLICT in the wiredtiger
- // layer under several circumstances, such as performing an insert, update, or find
- // on a document that is in a prepare statement. The non-transactional operation
- // would then be retried after the prepared transaction commits or aborts. However, with the
- // "WTSkipPrepareConflictRetries"failpoint enabled, the non-transactional operation would
- // instead return with a WT_ROLLBACK error. This would then get bubbled up as a
- // WriteConflictException. Enabling the "skipWriteConflictRetries" failpoint then prevents
- // the higher layers from retrying the entire operation.
- session.startTransaction();
+// A non-transactional operation conflicting with a write operation performed inside a
+// multistatement transaction can encounter a WT_PREPARE_CONFLICT in the wiredtiger
+// layer under several circumstances, such as performing an insert, update, or find
+// on a document that is in a prepare statement. The non-transactional operation
+// would then be retried after the prepared transaction commits or aborts. However, with the
+// "WTSkipPrepareConflictRetries"failpoint enabled, the non-transactional operation would
+// instead return with a WT_ROLLBACK error. This would then get bubbled up as a
+// WriteConflictException. Enabling the "skipWriteConflictRetries" failpoint then prevents
+// the higher layers from retrying the entire operation.
+session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {note: "from prepared transaction"}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {note: "from prepared transaction"}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandFailedWithCode(
- testColl.update({_id: 1}, {$set: {note: "outside prepared transaction"}}),
- ErrorCodes.WriteConflict);
+assert.commandFailedWithCode(
+ testColl.update({_id: 1}, {$set: {note: "outside prepared transaction"}}),
+ ErrorCodes.WriteConflict);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- const profileEntry =
- testDB.system.profile.findOne({"command.u.$set.note": "outside prepared transaction"});
- assert.gte(profileEntry.prepareReadConflicts, 1);
+const profileEntry =
+ testDB.system.profile.findOne({"command.u.$set.note": "outside prepared transaction"});
+assert.gte(profileEntry.prepareReadConflicts, 1);
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "WTSkipPrepareConflictRetries", mode: "off"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "WTSkipPrepareConflictRetries", mode: "off"}));
- assert.commandWorked(
- testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "off"}));
+assert.commandWorked(
+ testDB.adminCommand({configureFailPoint: "skipWriteConflictRetries", mode: "off"}));
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/noPassthrough/wt_unclean_shutdown.js b/jstests/noPassthrough/wt_unclean_shutdown.js
index 74dceda2a5c..6b11d8004c9 100644
--- a/jstests/noPassthrough/wt_unclean_shutdown.js
+++ b/jstests/noPassthrough/wt_unclean_shutdown.js
@@ -10,122 +10,122 @@
load('jstests/libs/parallelTester.js'); // For ScopedThread
(function() {
- 'use strict';
-
- // Skip this test if not running with the "wiredTiger" storage engine.
- if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
- }
+'use strict';
+
+// Skip this test if not running with the "wiredTiger" storage engine.
+if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
+ jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
+ return;
+}
+
+var dbpath = MongoRunner.dataPath + 'wt_unclean_shutdown';
+resetDbpath(dbpath);
+
+var conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ // Modify some WT settings:
+ // - Disable checkpoints based on log size so that we know no checkpoint gets written.
+ // - Explicitly set checkpoints to 60 seconds in case the default ever changes.
+ // - Turn off archiving and compression for easier debugging if there is a failure.
+ // - Make the maximum file size small to encourage lots of file changes. WT-2706 was
+ // related to log file switches.
+ wiredTigerEngineConfigString:
+ 'checkpoint=(wait=60,log_size=0),log=(archive=false,compressor=none,file_max=10M)'
+});
+assert.neq(null, conn, 'mongod was unable to start up');
+
+var insertWorkload = function(host, start, end) {
+ var conn = new Mongo(host);
+ var testDB = conn.getDB('test');
+
+ // Create a record larger than 128K which is the threshold to doing an unbuffered log
+ // write in WiredTiger.
+ var largeString = 'a'.repeat(1024 * 128);
+
+ for (var i = start; i < end; i++) {
+ var doc = {_id: i, x: 0};
+ // One of the bugs, WT-2696, was related to large records that used the unbuffered
+ // log code. Periodically insert the large record to stress that code path.
+ if (i % 30 === 0) {
+ doc.x = largeString;
+ }
- var dbpath = MongoRunner.dataPath + 'wt_unclean_shutdown';
- resetDbpath(dbpath);
-
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- // Modify some WT settings:
- // - Disable checkpoints based on log size so that we know no checkpoint gets written.
- // - Explicitly set checkpoints to 60 seconds in case the default ever changes.
- // - Turn off archiving and compression for easier debugging if there is a failure.
- // - Make the maximum file size small to encourage lots of file changes. WT-2706 was
- // related to log file switches.
- wiredTigerEngineConfigString:
- 'checkpoint=(wait=60,log_size=0),log=(archive=false,compressor=none,file_max=10M)'
- });
- assert.neq(null, conn, 'mongod was unable to start up');
-
- var insertWorkload = function(host, start, end) {
- var conn = new Mongo(host);
- var testDB = conn.getDB('test');
-
- // Create a record larger than 128K which is the threshold to doing an unbuffered log
- // write in WiredTiger.
- var largeString = 'a'.repeat(1024 * 128);
-
- for (var i = start; i < end; i++) {
- var doc = {_id: i, x: 0};
- // One of the bugs, WT-2696, was related to large records that used the unbuffered
- // log code. Periodically insert the large record to stress that code path.
- if (i % 30 === 0) {
- doc.x = largeString;
- }
-
- try {
- testDB.coll.insert(doc);
- } catch (e) {
- // Terminate the loop when mongod is killed.
- break;
- }
+ try {
+ testDB.coll.insert(doc);
+ } catch (e) {
+ // Terminate the loop when mongod is killed.
+ break;
}
- // Return i, the last record we were trying to insert. It is possible that mongod gets
- // killed in the middle but not finding a record at the end is okay. We're only
- // interested in records missing in the middle.
- return {start: start, end: i};
- };
-
- // Start the insert workload threads with partitioned input spaces.
- // We don't run long enough for threads to overlap. Adjust the per thread value if needed.
- var max_per_thread = 1000000;
- var num_threads = 8;
- var threads = [];
- for (var i = 0; i < num_threads; i++) {
- var t = new ScopedThread(
- insertWorkload, conn.host, i * max_per_thread, max_per_thread + (i * max_per_thread));
- threads.push(t);
- t.start();
}
-
- // Sleep for sometime less than a minute so that mongod has not yet written a checkpoint.
- // That will force WT to run recovery all the way from the beginning and we can detect missing
- // records. Sleep for 40 seconds to generate plenty of workload.
- sleep(40000);
-
- // Mongod needs an unclean shutdown so that WT recovery is forced on restart and we can detect
- // any missing records.
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
-
- // Retrieve the start and end data from each thread.
- var retData = [];
- threads.forEach(function(t) {
- t.join();
- retData.push(t.returnData());
- });
-
- // Restart the mongod. This forces WT to run recovery.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- wiredTigerEngineConfigString: 'log=(archive=false,compressor=none,file_max=10M)'
- });
- assert.neq(null, conn, 'mongod should have restarted');
-
- // Verify that every item between start and end for every thread exists in the collection now
- // that recovery has completed.
- var coll = conn.getDB('test').coll;
- for (var i = 0; i < retData.length; i++) {
- // For each start and end, verify every data item exists.
- var thread_data = retData[i];
- var absent = null;
- var missing = null;
- for (var j = thread_data.start; j <= thread_data.end; j++) {
- var idExists = coll.find({_id: j}).count() > 0;
- // The verification is a bit complex. We only want to fail if records in the middle
- // of the range are missing. Records at the end may be missing due to when mongod
- // was killed and records in memory are lost. It is only a bug if a record is missing
- // and a subsequent record exists.
- if (!idExists) {
- absent = j;
- } else if (absent !== null) {
- missing = absent;
- break;
- }
+ // Return i, the last record we were trying to insert. It is possible that mongod gets
+ // killed in the middle but not finding a record at the end is okay. We're only
+ // interested in records missing in the middle.
+ return {start: start, end: i};
+};
+
+// Start the insert workload threads with partitioned input spaces.
+// We don't run long enough for threads to overlap. Adjust the per thread value if needed.
+var max_per_thread = 1000000;
+var num_threads = 8;
+var threads = [];
+for (var i = 0; i < num_threads; i++) {
+ var t = new ScopedThread(
+ insertWorkload, conn.host, i * max_per_thread, max_per_thread + (i * max_per_thread));
+ threads.push(t);
+ t.start();
+}
+
+// Sleep for sometime less than a minute so that mongod has not yet written a checkpoint.
+// That will force WT to run recovery all the way from the beginning and we can detect missing
+// records. Sleep for 40 seconds to generate plenty of workload.
+sleep(40000);
+
+// Mongod needs an unclean shutdown so that WT recovery is forced on restart and we can detect
+// any missing records.
+MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+
+// Retrieve the start and end data from each thread.
+var retData = [];
+threads.forEach(function(t) {
+ t.join();
+ retData.push(t.returnData());
+});
+
+// Restart the mongod. This forces WT to run recovery.
+conn = MongoRunner.runMongod({
+ dbpath: dbpath,
+ noCleanData: true,
+ wiredTigerEngineConfigString: 'log=(archive=false,compressor=none,file_max=10M)'
+});
+assert.neq(null, conn, 'mongod should have restarted');
+
+// Verify that every item between start and end for every thread exists in the collection now
+// that recovery has completed.
+var coll = conn.getDB('test').coll;
+for (var i = 0; i < retData.length; i++) {
+ // For each start and end, verify every data item exists.
+ var thread_data = retData[i];
+ var absent = null;
+ var missing = null;
+ for (var j = thread_data.start; j <= thread_data.end; j++) {
+ var idExists = coll.find({_id: j}).count() > 0;
+ // The verification is a bit complex. We only want to fail if records in the middle
+ // of the range are missing. Records at the end may be missing due to when mongod
+ // was killed and records in memory are lost. It is only a bug if a record is missing
+ // and a subsequent record exists.
+ if (!idExists) {
+ absent = j;
+ } else if (absent !== null) {
+ missing = absent;
+ break;
}
- assert.eq(null,
- missing,
- 'Thread ' + i + ' missing id ' + missing + ' start and end for all threads: ' +
- tojson(retData));
}
+ assert.eq(null,
+ missing,
+ 'Thread ' + i + ' missing id ' + missing +
+ ' start and end for all threads: ' + tojson(retData));
+}
- MongoRunner.stopMongod(conn);
+MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/yield_during_writes.js b/jstests/noPassthrough/yield_during_writes.js
index 4d05c725659..d1e6845b58e 100644
--- a/jstests/noPassthrough/yield_during_writes.js
+++ b/jstests/noPassthrough/yield_during_writes.js
@@ -1,43 +1,43 @@
// Ensure that multi-update and multi-remove operations yield regularly.
// @tags: [requires_profiling]
(function() {
- 'use strict';
-
- function countOpYields(coll, op) {
- const profileEntry = coll.getDB()
- .system.profile.find({ns: coll.getFullName()})
- .sort({$natural: -1})
- .limit(1)
- .next();
- assert.eq(profileEntry.op, op);
- return profileEntry.numYield;
- }
-
- const nDocsToInsert = 300;
- const worksPerYield = 50;
-
- // Start a mongod that will yield every 50 work cycles.
- const mongod = MongoRunner.runMongod({
- setParameter: `internalQueryExecYieldIterations=${worksPerYield}`,
- profile: 2,
- });
- assert.neq(null, mongod, 'mongod was unable to start up');
-
- const coll = mongod.getDB('test').yield_during_writes;
- coll.drop();
-
- for (let i = 0; i < nDocsToInsert; i++) {
- assert.writeOK(coll.insert({_id: i}));
- }
-
- // A multi-update doing a collection scan should yield about nDocsToInsert / worksPerYield
- // times.
- assert.writeOK(coll.update({}, {$inc: {counter: 1}}, {multi: true}));
- assert.gt(countOpYields(coll, 'update'), (nDocsToInsert / worksPerYield) - 2);
-
- // Likewise, a multi-remove should also yield approximately every worksPerYield documents.
- assert.writeOK(coll.remove({}, {multi: true}));
- assert.gt(countOpYields(coll, 'remove'), (nDocsToInsert / worksPerYield) - 2);
-
- MongoRunner.stopMongod(mongod);
+'use strict';
+
+function countOpYields(coll, op) {
+ const profileEntry = coll.getDB()
+ .system.profile.find({ns: coll.getFullName()})
+ .sort({$natural: -1})
+ .limit(1)
+ .next();
+ assert.eq(profileEntry.op, op);
+ return profileEntry.numYield;
+}
+
+const nDocsToInsert = 300;
+const worksPerYield = 50;
+
+// Start a mongod that will yield every 50 work cycles.
+const mongod = MongoRunner.runMongod({
+ setParameter: `internalQueryExecYieldIterations=${worksPerYield}`,
+ profile: 2,
+});
+assert.neq(null, mongod, 'mongod was unable to start up');
+
+const coll = mongod.getDB('test').yield_during_writes;
+coll.drop();
+
+for (let i = 0; i < nDocsToInsert; i++) {
+ assert.writeOK(coll.insert({_id: i}));
+}
+
+// A multi-update doing a collection scan should yield about nDocsToInsert / worksPerYield
+// times.
+assert.writeOK(coll.update({}, {$inc: {counter: 1}}, {multi: true}));
+assert.gt(countOpYields(coll, 'update'), (nDocsToInsert / worksPerYield) - 2);
+
+// Likewise, a multi-remove should also yield approximately every worksPerYield documents.
+assert.writeOK(coll.remove({}, {multi: true}));
+assert.gt(countOpYields(coll, 'remove'), (nDocsToInsert / worksPerYield) - 2);
+
+MongoRunner.stopMongod(mongod);
})();