summaryrefslogtreecommitdiff
path: root/jstests/replsets
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:20:35 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-27 11:02:23 -0400
commit134a4083953270e8a11430395357fb70a29047ad (patch)
treedd428e1230e31d92b20b393dfdc17ffe7fa79cb6 /jstests/replsets
parent1e46b5049003f427047e723ea5fab15b5a9253ca (diff)
downloadmongo-134a4083953270e8a11430395357fb70a29047ad.tar.gz
SERVER-41772 Apply clang-format 7.0.1 to the codebase
Diffstat (limited to 'jstests/replsets')
-rw-r--r--jstests/replsets/abort_in_progress_transactions_on_step_up.js275
-rw-r--r--jstests/replsets/agg_write_concern_zero_batch_size.js114
-rw-r--r--jstests/replsets/aggregation_write_concern.js86
-rw-r--r--jstests/replsets/already_checked_out_session.js132
-rw-r--r--jstests/replsets/apply_batch_only_goes_forward.js146
-rw-r--r--jstests/replsets/apply_batches_totalMillis.js89
-rw-r--r--jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js14
-rw-r--r--jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js14
-rw-r--r--jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js14
-rw-r--r--jstests/replsets/apply_ops_create_indexes.js201
-rw-r--r--jstests/replsets/apply_ops_create_view.js26
-rw-r--r--jstests/replsets/apply_ops_create_with_uuid.js97
-rw-r--r--jstests/replsets/apply_ops_idempotency.js374
-rw-r--r--jstests/replsets/apply_ops_insert_write_conflict_atomic.js12
-rw-r--r--jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js12
-rw-r--r--jstests/replsets/apply_ops_lastop.js95
-rw-r--r--jstests/replsets/apply_ops_wc.js217
-rw-r--r--jstests/replsets/apply_transaction_with_yield.js52
-rw-r--r--jstests/replsets/arbiters_not_included_in_w2_wc.js59
-rw-r--r--jstests/replsets/arbiters_not_included_in_w3_wc.js55
-rw-r--r--jstests/replsets/auth1.js395
-rw-r--r--jstests/replsets/auth2.js128
-rw-r--r--jstests/replsets/auth_no_pri.js45
-rw-r--r--jstests/replsets/await_replication_timeout.js122
-rw-r--r--jstests/replsets/awaitdata_getmore_new_last_committed_optime.js198
-rw-r--r--jstests/replsets/background_index.js91
-rw-r--r--jstests/replsets/batch_write_command_wc.js315
-rw-r--r--jstests/replsets/buildindexes.js90
-rw-r--r--jstests/replsets/buildindexes_false_with_system_indexes.js137
-rw-r--r--jstests/replsets/bulk_api_wc.js281
-rw-r--r--jstests/replsets/capped_insert_order.js88
-rw-r--r--jstests/replsets/catchup.js422
-rw-r--r--jstests/replsets/catchup_takeover_one_high_priority.js166
-rw-r--r--jstests/replsets/catchup_takeover_two_nodes_ahead.js96
-rw-r--r--jstests/replsets/chaining_removal.js120
-rw-r--r--jstests/replsets/change_stream_speculative_majority.js156
-rw-r--r--jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js72
-rw-r--r--jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js165
-rw-r--r--jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js150
-rw-r--r--jstests/replsets/change_stream_speculative_majority_optimized_wait.js122
-rw-r--r--jstests/replsets/change_stream_speculative_majority_rollback.js194
-rw-r--r--jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js109
-rw-r--r--jstests/replsets/change_stream_stepdown.js258
-rw-r--r--jstests/replsets/clean_shutdown_oplog_state.js160
-rw-r--r--jstests/replsets/collate_id.js96
-rw-r--r--jstests/replsets/command_response_operation_time.js103
-rw-r--r--jstests/replsets/commands_that_write_accept_wc.js350
-rw-r--r--jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js76
-rw-r--r--jstests/replsets/commit_transaction_initial_sync_data_already_applied.js122
-rw-r--r--jstests/replsets/commit_transaction_recovery.js90
-rw-r--r--jstests/replsets/dbcheck.js663
-rw-r--r--jstests/replsets/dbhash_lock_acquisition.js169
-rw-r--r--jstests/replsets/dbhash_read_at_cluster_time.js200
-rw-r--r--jstests/replsets/dbhash_system_collections.js100
-rw-r--r--jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js172
-rw-r--r--jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js249
-rw-r--r--jstests/replsets/disallow_adding_initialized_node1.js127
-rw-r--r--jstests/replsets/disallow_adding_initialized_node2.js139
-rw-r--r--jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js86
-rw-r--r--jstests/replsets/disconnect_on_legacy_write_to_secondary.js201
-rw-r--r--jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js190
-rw-r--r--jstests/replsets/drain.js170
-rw-r--r--jstests/replsets/drop_collections_two_phase.js44
-rw-r--r--jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js167
-rw-r--r--jstests/replsets/drop_collections_two_phase_apply_ops_create.js113
-rw-r--r--jstests/replsets/drop_collections_two_phase_apply_ops_drop.js95
-rw-r--r--jstests/replsets/drop_collections_two_phase_apply_ops_rename.js129
-rw-r--r--jstests/replsets/drop_collections_two_phase_dbhash.js70
-rw-r--r--jstests/replsets/drop_collections_two_phase_rename_drop_target.js220
-rw-r--r--jstests/replsets/drop_collections_two_phase_step_down.js86
-rw-r--r--jstests/replsets/drop_collections_two_phase_write_concern.js156
-rw-r--r--jstests/replsets/drop_databases_two_phase.js312
-rw-r--r--jstests/replsets/drop_db.js91
-rw-r--r--jstests/replsets/drop_oplog.js48
-rw-r--r--jstests/replsets/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js45
-rw-r--r--jstests/replsets/election_handoff_basic.js30
-rw-r--r--jstests/replsets/election_handoff_flip.js34
-rw-r--r--jstests/replsets/election_handoff_higher_priority.js36
-rw-r--r--jstests/replsets/election_handoff_one_unelectable.js32
-rw-r--r--jstests/replsets/election_handoff_via_signal.js30
-rw-r--r--jstests/replsets/emptycapped.js186
-rw-r--r--jstests/replsets/failcommand_ignores_internal.js54
-rw-r--r--jstests/replsets/find_and_modify_wc.js124
-rw-r--r--jstests/replsets/force_sync_source_candidate.js63
-rw-r--r--jstests/replsets/freeze_timeout.js92
-rw-r--r--jstests/replsets/fsync_lock_read_secondaries.js83
-rw-r--r--jstests/replsets/get_replication_info_helper.js89
-rw-r--r--jstests/replsets/get_status.js36
-rw-r--r--jstests/replsets/groupAndMapReduce.js2
-rw-r--r--jstests/replsets/hang_before_releasing_transaction_oplog_hole.js92
-rw-r--r--jstests/replsets/id_index_replication.js116
-rw-r--r--jstests/replsets/initial_sync2.js1
-rw-r--r--jstests/replsets/initial_sync_applier_error.js66
-rw-r--r--jstests/replsets/initial_sync_capped_index.js182
-rw-r--r--jstests/replsets/initial_sync_cloner_dups.js228
-rw-r--r--jstests/replsets/initial_sync_commit_prepared_transaction.js208
-rw-r--r--jstests/replsets/initial_sync_document_validation.js36
-rw-r--r--jstests/replsets/initial_sync_drop_collection.js316
-rw-r--r--jstests/replsets/initial_sync_during_stepdown.js333
-rw-r--r--jstests/replsets/initial_sync_fail_insert_once.js46
-rw-r--r--jstests/replsets/initial_sync_fcv.js163
-rw-r--r--jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js378
-rw-r--r--jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js190
-rw-r--r--jstests/replsets/initial_sync_invalid_index_spec.js74
-rw-r--r--jstests/replsets/initial_sync_invalid_views.js44
-rw-r--r--jstests/replsets/initial_sync_move_forward.js135
-rw-r--r--jstests/replsets/initial_sync_oplog_hole.js155
-rw-r--r--jstests/replsets/initial_sync_oplog_rollover.js97
-rw-r--r--jstests/replsets/initial_sync_preserves_active_txns.js133
-rw-r--r--jstests/replsets/initial_sync_read_concern_no_oplog.js42
-rw-r--r--jstests/replsets/initial_sync_rename_collection.js192
-rw-r--r--jstests/replsets/initial_sync_replSetGetStatus.js172
-rw-r--r--jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js134
-rw-r--r--jstests/replsets/initial_sync_test_fixture_test.js263
-rw-r--r--jstests/replsets/initial_sync_update_missing_doc1.js55
-rw-r--r--jstests/replsets/initial_sync_update_missing_doc2.js67
-rw-r--r--jstests/replsets/initial_sync_update_missing_doc3.js88
-rw-r--r--jstests/replsets/initial_sync_update_missing_doc_with_prepare.js154
-rw-r--r--jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js138
-rw-r--r--jstests/replsets/initial_sync_uuid_not_found.js103
-rw-r--r--jstests/replsets/initial_sync_views.js52
-rw-r--r--jstests/replsets/initiate.js32
-rw-r--r--jstests/replsets/inmemory_preserves_active_txns.js196
-rw-r--r--jstests/replsets/interrupted_batch_insert.js208
-rw-r--r--jstests/replsets/invalid_index_spec.js112
-rw-r--r--jstests/replsets/ismaster1.js6
-rw-r--r--jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js207
-rw-r--r--jstests/replsets/kill_ttl_on_stepdown.js92
-rw-r--r--jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js236
-rw-r--r--jstests/replsets/last_error_reported_after_stepdown.js191
-rw-r--r--jstests/replsets/last_op_visible.js83
-rw-r--r--jstests/replsets/last_vote.js397
-rw-r--r--jstests/replsets/lastop.js175
-rw-r--r--jstests/replsets/libs/election_handoff.js6
-rw-r--r--jstests/replsets/libs/initial_sync_test.js1
-rw-r--r--jstests/replsets/libs/initial_sync_update_missing_doc.js7
-rw-r--r--jstests/replsets/libs/rename_across_dbs.js17
-rw-r--r--jstests/replsets/libs/rollback_test.js1
-rw-r--r--jstests/replsets/libs/secondary_reads_test.js4
-rw-r--r--jstests/replsets/libs/tags.js93
-rw-r--r--jstests/replsets/libs/two_phase_drops.js4
-rw-r--r--jstests/replsets/linearizable_read_concern.js254
-rw-r--r--jstests/replsets/localhost1.js20
-rw-r--r--jstests/replsets/localhost2.js26
-rw-r--r--jstests/replsets/localhost3.js20
-rw-r--r--jstests/replsets/log_secondary_oplog_application.js109
-rw-r--r--jstests/replsets/maintenance2.js68
-rw-r--r--jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js106
-rw-r--r--jstests/replsets/maxSyncSourceLagSecs.js100
-rw-r--r--jstests/replsets/minimum_visible_with_cluster_time.js160
-rw-r--r--jstests/replsets/mr_nonrepl_coll_in_local_db.js126
-rw-r--r--jstests/replsets/multikey_write_avoids_prepare_conflict.js96
-rw-r--r--jstests/replsets/nested_apply_ops_create_indexes.js105
-rw-r--r--jstests/replsets/no_disconnect_on_stepdown.js168
-rw-r--r--jstests/replsets/no_flapping_during_network_partition.js60
-rw-r--r--jstests/replsets/noop_write_after_read_only_txn.js134
-rw-r--r--jstests/replsets/noop_writes_wait_for_write_concern.js448
-rw-r--r--jstests/replsets/noop_writes_wait_for_write_concern_fcv.js108
-rw-r--r--jstests/replsets/not_master_unacknowledged_write.js135
-rw-r--r--jstests/replsets/opcounters_repl.js179
-rw-r--r--jstests/replsets/operation_time_read_and_write_concern.js229
-rw-r--r--jstests/replsets/oplog_format_create_indexes.js120
-rw-r--r--jstests/replsets/oplog_replay_on_startup_with_bad_op.js94
-rw-r--r--jstests/replsets/oplog_rollover.js223
-rw-r--r--jstests/replsets/oplog_term.js51
-rw-r--r--jstests/replsets/oplog_visibility.js198
-rw-r--r--jstests/replsets/oplog_wallclock.js40
-rw-r--r--jstests/replsets/optime.js10
-rw-r--r--jstests/replsets/prepare_conflict_read_concern_behavior.js636
-rw-r--r--jstests/replsets/prepare_failover_rollback_commit.js80
-rw-r--r--jstests/replsets/prepare_prepared_transaction_wc_timeout.js125
-rw-r--r--jstests/replsets/prepare_survives_primary_reconfig_failover.js8
-rw-r--r--jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js8
-rw-r--r--jstests/replsets/prepare_transaction_fails_on_standalone.js18
-rw-r--r--jstests/replsets/prepare_transaction_fails_with_arbiters.js45
-rw-r--r--jstests/replsets/prepare_transaction_fails_without_majority_reads.js34
-rw-r--r--jstests/replsets/prepare_transaction_index_build.js148
-rw-r--r--jstests/replsets/prepare_transaction_read_at_cluster_time.js297
-rw-r--r--jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js131
-rw-r--r--jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js114
-rw-r--r--jstests/replsets/prepared_transaction_on_failover.js251
-rw-r--r--jstests/replsets/primary_casts_vote_on_stepdown.js41
-rw-r--r--jstests/replsets/priority_takeover_cascading_priorities.js48
-rw-r--r--jstests/replsets/priority_takeover_one_node_higher_priority.js90
-rw-r--r--jstests/replsets/priority_takeover_two_nodes_equal_priority.js87
-rw-r--r--jstests/replsets/read_after_optime.js152
-rw-r--r--jstests/replsets/read_at_cluster_time_outside_transactions.js283
-rw-r--r--jstests/replsets/read_committed.js318
-rw-r--r--jstests/replsets/read_committed_after_rollback.js280
-rw-r--r--jstests/replsets/read_committed_lookup.js60
-rw-r--r--jstests/replsets/read_committed_no_snapshots.js123
-rw-r--r--jstests/replsets/read_committed_on_secondary.js252
-rw-r--r--jstests/replsets/read_committed_stale_history.js276
-rw-r--r--jstests/replsets/read_committed_with_catalog_changes.js551
-rw-r--r--jstests/replsets/read_concern_majority_getmore_secondaries.js120
-rw-r--r--jstests/replsets/read_concern_uninitated_set.js97
-rw-r--r--jstests/replsets/read_majority_two_arbs.js107
-rw-r--r--jstests/replsets/read_operations_during_rollback.js186
-rw-r--r--jstests/replsets/read_operations_during_step_down.js208
-rw-r--r--jstests/replsets/reconfig.js83
-rw-r--r--jstests/replsets/reconfig_during_election.js67
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync.js458
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js234
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js351
-rw-r--r--jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js154
-rw-r--r--jstests/replsets/recover_committed_aborted_prepared_transactions.js225
-rw-r--r--jstests/replsets/recover_multiple_prepared_transactions_startup.js284
-rw-r--r--jstests/replsets/recover_prepared_transaction_state.js331
-rw-r--r--jstests/replsets/recover_prepared_transactions_startup_secondary_application.js162
-rw-r--r--jstests/replsets/recover_prepared_txn_with_multikey_write.js54
-rw-r--r--jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js117
-rw-r--r--jstests/replsets/recovery_preserves_active_txns.js155
-rw-r--r--jstests/replsets/refresh_sessions_rs.js158
-rw-r--r--jstests/replsets/rename_across_dbs.js6
-rw-r--r--jstests/replsets/rename_across_dbs_drop_target.js10
-rw-r--r--jstests/replsets/rename_collection_between_unrepl_and_repl.js52
-rw-r--r--jstests/replsets/rename_collection_temp.js112
-rw-r--r--jstests/replsets/replset1.js1
-rw-r--r--jstests/replsets/replset2.js1
-rw-r--r--jstests/replsets/replset3.js2
-rw-r--r--jstests/replsets/replset4.js1
-rw-r--r--jstests/replsets/replset5.js133
-rw-r--r--jstests/replsets/replset8.js142
-rw-r--r--jstests/replsets/replsetarb2.js88
-rw-r--r--jstests/replsets/replsetprio1.js102
-rw-r--r--jstests/replsets/replsetrestart1.js92
-rw-r--r--jstests/replsets/replsets_killop.js8
-rw-r--r--jstests/replsets/request_primary_stepdown.js51
-rw-r--r--jstests/replsets/restore_term.js96
-rw-r--r--jstests/replsets/retryable_commit_transaction_after_failover.js183
-rw-r--r--jstests/replsets/retryable_commit_transaction_after_restart.js159
-rw-r--r--jstests/replsets/retryable_prepared_commit_transaction_after_failover.js142
-rw-r--r--jstests/replsets/retryable_write_concern.js463
-rw-r--r--jstests/replsets/retryable_writes_direct_write_to_config_transactions.js180
-rw-r--r--jstests/replsets/retryable_writes_failover.js233
-rw-r--r--jstests/replsets/rollback_aborted_prepared_transaction.js194
-rw-r--r--jstests/replsets/rollback_after_disabling_majority_reads.js58
-rw-r--r--jstests/replsets/rollback_after_enabling_majority_reads.js114
-rw-r--r--jstests/replsets/rollback_all_op_types.js608
-rw-r--r--jstests/replsets/rollback_auth.js403
-rw-r--r--jstests/replsets/rollback_capped_deletions.js66
-rw-r--r--jstests/replsets/rollback_collmods.js202
-rw-r--r--jstests/replsets/rollback_crud_op_sequences.js224
-rw-r--r--jstests/replsets/rollback_ddl_op_sequences.js274
-rw-r--r--jstests/replsets/rollback_drop_database.js109
-rw-r--r--jstests/replsets/rollback_drop_index_after_rename.js98
-rw-r--r--jstests/replsets/rollback_dup_ids.js57
-rw-r--r--jstests/replsets/rollback_files_no_prepare_conflict.js64
-rw-r--r--jstests/replsets/rollback_prepare_transaction.js184
-rw-r--r--jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js206
-rw-r--r--jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js179
-rw-r--r--jstests/replsets/rollback_remote_cursor_retry.js61
-rw-r--r--jstests/replsets/rollback_rename_collection_on_sync_source.js65
-rw-r--r--jstests/replsets/rollback_rename_count.js82
-rw-r--r--jstests/replsets/rollback_time_limit_param.js67
-rw-r--r--jstests/replsets/rollback_transaction_table.js426
-rw-r--r--jstests/replsets/rollback_transactions_count.js90
-rw-r--r--jstests/replsets/rollback_unprepared_transactions.js108
-rw-r--r--jstests/replsets/rollback_via_refetch_commit_transaction.js110
-rw-r--r--jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js92
-rw-r--r--jstests/replsets/rollback_views.js235
-rw-r--r--jstests/replsets/rollback_waits_for_bgindex_completion.js160
-rw-r--r--jstests/replsets/rollback_with_socket_error_then_steady_state.js257
-rw-r--r--jstests/replsets/rollover_preserves_active_txns.js166
-rw-r--r--jstests/replsets/rslib.js774
-rw-r--r--jstests/replsets/secondary_as_sync_source.js118
-rw-r--r--jstests/replsets/secondary_reads_timestamp_visibility.js180
-rw-r--r--jstests/replsets/secondary_reads_unique_indexes.js134
-rw-r--r--jstests/replsets/server8070.js274
-rw-r--r--jstests/replsets/server_election_metrics.js74
-rw-r--r--jstests/replsets/sessions_collection_auto_healing.js164
-rw-r--r--jstests/replsets/shutdown.js42
-rw-r--r--jstests/replsets/shutdown_primary.js82
-rw-r--r--jstests/replsets/shutdown_with_prepared_transaction.js44
-rw-r--r--jstests/replsets/sized_zero_capped.js40
-rw-r--r--jstests/replsets/slave_delay_clean_shutdown.js88
-rw-r--r--jstests/replsets/slavedelay1.js1
-rw-r--r--jstests/replsets/slaveok_read_pref.js92
-rw-r--r--jstests/replsets/speculative_majority_find.js280
-rw-r--r--jstests/replsets/speculative_majority_supported_commands.js104
-rw-r--r--jstests/replsets/speculative_read_transaction.js202
-rw-r--r--jstests/replsets/speculative_transaction.js162
-rw-r--r--jstests/replsets/standalone_replication_recovery_prepare_only.js8
-rw-r--r--jstests/replsets/standalone_replication_recovery_prepare_with_commit.js8
-rw-r--r--jstests/replsets/startParallelShell.js52
-rw-r--r--jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js160
-rw-r--r--jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js212
-rw-r--r--jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js72
-rw-r--r--jstests/replsets/step_down_during_draining.js229
-rw-r--r--jstests/replsets/step_down_during_draining2.js311
-rw-r--r--jstests/replsets/step_down_during_draining3.js231
-rw-r--r--jstests/replsets/step_down_on_secondary.js236
-rw-r--r--jstests/replsets/stepdown3.js89
-rw-r--r--jstests/replsets/stepdown_catch_up_opt.js134
-rw-r--r--jstests/replsets/stepdown_kill_other_ops.js108
-rw-r--r--jstests/replsets/stepdown_killop.js118
-rw-r--r--jstests/replsets/stepdown_long_wait_time.js105
-rw-r--r--jstests/replsets/stepdown_needs_electable_secondary.js232
-rw-r--r--jstests/replsets/stepdown_needs_majority.js174
-rw-r--r--jstests/replsets/stepup.js106
-rw-r--r--jstests/replsets/storage_commit_out_of_order.js108
-rw-r--r--jstests/replsets/sync2.js92
-rw-r--r--jstests/replsets/system_profile.js75
-rw-r--r--jstests/replsets/system_profile_secondary.js34
-rw-r--r--jstests/replsets/tags.js8
-rw-r--r--jstests/replsets/tags2.js112
-rw-r--r--jstests/replsets/tags_with_reconfig.js102
-rw-r--r--jstests/replsets/temp_namespace_restart_as_standalone.js163
-rw-r--r--jstests/replsets/test_command.js255
-rw-r--r--jstests/replsets/too_stale_secondary.js186
-rw-r--r--jstests/replsets/transaction_table_multi_statement_txn.js69
-rw-r--r--jstests/replsets/transaction_table_oplog_replay.js373
-rw-r--r--jstests/replsets/transactions_after_rollback_via_refetch.js223
-rw-r--r--jstests/replsets/transactions_committed_with_tickets_exhausted.js147
-rw-r--r--jstests/replsets/transactions_during_step_down.js242
-rw-r--r--jstests/replsets/transactions_on_secondaries_not_allowed.js135
-rw-r--r--jstests/replsets/transactions_only_allowed_on_primaries.js242
-rw-r--r--jstests/replsets/transactions_reaped_with_tickets_exhausted.js137
-rw-r--r--jstests/replsets/transactions_wait_for_write_concern.js376
-rw-r--r--jstests/replsets/transient_txn_error_labels.js474
-rw-r--r--jstests/replsets/transient_txn_error_labels_with_write_concern.js236
-rw-r--r--jstests/replsets/two_nodes_priority_take_over.js2
-rw-r--r--jstests/replsets/txn_override_unittests.js3704
-rw-r--r--jstests/replsets/unconditional_step_down.js386
-rw-r--r--jstests/replsets/uninitialized_fcv_access.js41
-rw-r--r--jstests/replsets/update_commit_point_from_sync_source_ignores_term.js139
-rw-r--r--jstests/replsets/user_management_wc.js262
-rw-r--r--jstests/replsets/verify_sessions_expiration_rs.js222
-rw-r--r--jstests/replsets/view_catalog_oplog_entries.js62
-rw-r--r--jstests/replsets/write_concern_after_stepdown.js190
-rw-r--r--jstests/replsets/write_concern_after_stepdown_and_stepup.js212
331 files changed, 25667 insertions, 25847 deletions
diff --git a/jstests/replsets/abort_in_progress_transactions_on_step_up.js b/jstests/replsets/abort_in_progress_transactions_on_step_up.js
index ef2dda4cf80..a47baee071a 100644
--- a/jstests/replsets/abort_in_progress_transactions_on_step_up.js
+++ b/jstests/replsets/abort_in_progress_transactions_on_step_up.js
@@ -4,139 +4,144 @@
* @tags: [uses_transactions, exclude_from_large_txns]
*/
(function() {
- "use strict";
- load("jstests/replsets/rslib.js"); // For reconnect()
- load("jstests/libs/check_log.js");
-
- function getTxnTableEntry(db) {
- let txnTableEntries = db.getSiblingDB("config")["transactions"].find().toArray();
- assert.eq(txnTableEntries.length, 1);
- return txnTableEntries[0];
- }
-
- const replTest = new ReplSetTest({
- nodes: 3,
- nodeOptions: {
- setParameter: {
- maxNumberOfTransactionOperationsInSingleOplogEntry: 1,
- bgSyncOplogFetcherBatchSize: 1
- }
- },
- });
-
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- // Disable primary catchup and chaining.
- config.settings = {catchUpTimeoutMillis: 0, chainingAllowed: false};
- replTest.initiate(config);
-
- setLogVerbosity(replTest.nodes, {"replication": {"verbosity": 3}});
-
- const dbName = jsTest.name();
- const collName = "coll";
-
- const primary = replTest.nodes[0];
- const testDB = primary.getDB(dbName);
- const newPrimary = replTest.nodes[1];
- const newTestDB = newPrimary.getDB(dbName);
-
- testDB.dropDatabase();
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Prevent the priority: 0 node from fetching new ops so that it can vote for the new primary.
- assert.commandWorked(
- replTest.nodes[2].adminCommand({configureFailPoint: 'stopReplProducer', mode: 'alwaysOn'}));
-
- jsTest.log("Stop secondary oplog replication before the last operation in the transaction.");
- // The stopReplProducerOnDocument failpoint ensures that secondary stops replicating before
- // applying the last operation in the transaction. This depends on the oplog fetcher batch size
- // being 1.
- assert.commandWorked(newPrimary.adminCommand({
- configureFailPoint: "stopReplProducerOnDocument",
- mode: "alwaysOn",
- data: {document: {"applyOps.o._id": "last in txn"}}
- }));
-
- jsTestLog("Starting transaction");
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- session.startTransaction({writeConcern: {w: "majority", wtimeout: 500}});
-
- const doc = {_id: "first in txn on primary " + primary};
- assert.commandWorked(sessionDB.getCollection(collName).insert(doc));
- assert.commandWorked(sessionDB.getCollection(collName).insert({_id: "last in txn"}));
-
- jsTestLog("Committing transaction but fail on replication");
- let res = session.commitTransaction_forTesting();
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
-
- // Remember the commit OpTime on primary.
- let txnTableEntry = getTxnTableEntry(testDB);
- assert.eq(txnTableEntry.state, "committed");
- const commitOpTime = getTxnTableEntry(testDB).lastWriteOpTime;
-
- jsTestLog("Wait for the new primary to block on fail point.");
- checkLog.contains(newPrimary, "stopReplProducerOnDocument fail point is enabled.");
-
- // Now the transaction should be in-progress on newPrimary.
- txnTableEntry = getTxnTableEntry(newTestDB);
- assert.eq(txnTableEntry.state, "inProgress");
- // The startOpTime should be less than the commit optime.
- assert.eq(rs.compareOpTimes(txnTableEntry.startOpTime, commitOpTime), -1);
-
- jsTestLog("Stepping down primary via heartbeat.");
- assert.commandWorked(newPrimary.adminCommand({replSetStepUp: 1}));
- replTest.awaitNodesAgreeOnPrimary();
- reconnect(primary);
-
- // Make sure we won't apply the whole transaction by any chance.
- jsTestLog("Wait for the new primary to stop replication after primary catch-up.");
- checkLog.contains(newPrimary, "Stopping replication producer");
-
- jsTestLog("Enable replication on the new primary so that it can finish state transition");
- assert.commandWorked(newPrimary.adminCommand({
- configureFailPoint: "stopReplProducerOnDocument",
- mode: "off",
- }));
-
- assert.eq(replTest.getPrimary(), newPrimary);
- assert.commandWorked(
- replTest.nodes[2].adminCommand({configureFailPoint: 'stopReplProducer', mode: 'off'}));
- replTest.awaitReplication();
-
- jsTestLog("The transaction has been aborted on the new primary.");
- // Create a proxy session to reuse the session state of the old primary.
- const newSession = new _DelegatingDriverSession(newPrimary, session);
- const newSessionDB = newSession.getDatabase(dbName);
- // The transaction has been aborted.
- assert.commandFailedWithCode(newSessionDB.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(newSession.getTxnNumber_forTesting()),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.NoSuchTransaction);
-
- // The old primary rolls back the local committed transaction.
- assert.eq(testDB.getCollection(collName).find().itcount(), 0);
- assert.eq(newTestDB.getCollection(collName).find().itcount(), 0);
-
- // The transaction table should be the same on both old and new primaries.
- txnTableEntry = getTxnTableEntry(newTestDB);
- assert.eq(txnTableEntry.state, "aborted");
- assert(!txnTableEntry.hasOwnProperty("startOpTime"));
- txnTableEntry = getTxnTableEntry(testDB);
- assert.eq(txnTableEntry.state, "aborted");
- assert(!txnTableEntry.hasOwnProperty("startOpTime"));
-
- jsTestLog("Running another transaction on the new primary");
- newSession.startTransaction({writeConcern: {w: 3}});
- const secondDoc = {_id: "second-doc"};
- assert.commandWorked(newSession.getDatabase(dbName).getCollection(collName).insert(secondDoc));
- assert.commandWorked(newSession.commitTransaction_forTesting());
- assert.docEq(testDB.getCollection(collName).find().toArray(), [secondDoc]);
- assert.docEq(newTestDB.getCollection(collName).find().toArray(), [secondDoc]);
-
- replTest.stopSet();
+"use strict";
+load("jstests/replsets/rslib.js"); // For reconnect()
+load("jstests/libs/check_log.js");
+
+function getTxnTableEntry(db) {
+ let txnTableEntries = db.getSiblingDB("config")["transactions"].find().toArray();
+ assert.eq(txnTableEntries.length, 1);
+ return txnTableEntries[0];
+}
+
+const replTest = new ReplSetTest({
+ nodes: 3,
+ nodeOptions: {
+ setParameter:
+ {maxNumberOfTransactionOperationsInSingleOplogEntry: 1, bgSyncOplogFetcherBatchSize: 1}
+ },
+});
+
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+// Disable primary catchup and chaining.
+config.settings = {
+ catchUpTimeoutMillis: 0,
+ chainingAllowed: false
+};
+replTest.initiate(config);
+
+setLogVerbosity(replTest.nodes, {"replication": {"verbosity": 3}});
+
+const dbName = jsTest.name();
+const collName = "coll";
+
+const primary = replTest.nodes[0];
+const testDB = primary.getDB(dbName);
+const newPrimary = replTest.nodes[1];
+const newTestDB = newPrimary.getDB(dbName);
+
+testDB.dropDatabase();
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Prevent the priority: 0 node from fetching new ops so that it can vote for the new primary.
+assert.commandWorked(
+ replTest.nodes[2].adminCommand({configureFailPoint: 'stopReplProducer', mode: 'alwaysOn'}));
+
+jsTest.log("Stop secondary oplog replication before the last operation in the transaction.");
+// The stopReplProducerOnDocument failpoint ensures that secondary stops replicating before
+// applying the last operation in the transaction. This depends on the oplog fetcher batch size
+// being 1.
+assert.commandWorked(newPrimary.adminCommand({
+ configureFailPoint: "stopReplProducerOnDocument",
+ mode: "alwaysOn",
+ data: {document: {"applyOps.o._id": "last in txn"}}
+}));
+
+jsTestLog("Starting transaction");
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+session.startTransaction({writeConcern: {w: "majority", wtimeout: 500}});
+
+const doc = {
+ _id: "first in txn on primary " + primary
+};
+assert.commandWorked(sessionDB.getCollection(collName).insert(doc));
+assert.commandWorked(sessionDB.getCollection(collName).insert({_id: "last in txn"}));
+
+jsTestLog("Committing transaction but fail on replication");
+let res = session.commitTransaction_forTesting();
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+
+// Remember the commit OpTime on primary.
+let txnTableEntry = getTxnTableEntry(testDB);
+assert.eq(txnTableEntry.state, "committed");
+const commitOpTime = getTxnTableEntry(testDB).lastWriteOpTime;
+
+jsTestLog("Wait for the new primary to block on fail point.");
+checkLog.contains(newPrimary, "stopReplProducerOnDocument fail point is enabled.");
+
+// Now the transaction should be in-progress on newPrimary.
+txnTableEntry = getTxnTableEntry(newTestDB);
+assert.eq(txnTableEntry.state, "inProgress");
+// The startOpTime should be less than the commit optime.
+assert.eq(rs.compareOpTimes(txnTableEntry.startOpTime, commitOpTime), -1);
+
+jsTestLog("Stepping down primary via heartbeat.");
+assert.commandWorked(newPrimary.adminCommand({replSetStepUp: 1}));
+replTest.awaitNodesAgreeOnPrimary();
+reconnect(primary);
+
+// Make sure we won't apply the whole transaction by any chance.
+jsTestLog("Wait for the new primary to stop replication after primary catch-up.");
+checkLog.contains(newPrimary, "Stopping replication producer");
+
+jsTestLog("Enable replication on the new primary so that it can finish state transition");
+assert.commandWorked(newPrimary.adminCommand({
+ configureFailPoint: "stopReplProducerOnDocument",
+ mode: "off",
+}));
+
+assert.eq(replTest.getPrimary(), newPrimary);
+assert.commandWorked(
+ replTest.nodes[2].adminCommand({configureFailPoint: 'stopReplProducer', mode: 'off'}));
+replTest.awaitReplication();
+
+jsTestLog("The transaction has been aborted on the new primary.");
+// Create a proxy session to reuse the session state of the old primary.
+const newSession = new _DelegatingDriverSession(newPrimary, session);
+const newSessionDB = newSession.getDatabase(dbName);
+// The transaction has been aborted.
+assert.commandFailedWithCode(newSessionDB.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(newSession.getTxnNumber_forTesting()),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.NoSuchTransaction);
+
+// The old primary rolls back the local committed transaction.
+assert.eq(testDB.getCollection(collName).find().itcount(), 0);
+assert.eq(newTestDB.getCollection(collName).find().itcount(), 0);
+
+// The transaction table should be the same on both old and new primaries.
+txnTableEntry = getTxnTableEntry(newTestDB);
+assert.eq(txnTableEntry.state, "aborted");
+assert(!txnTableEntry.hasOwnProperty("startOpTime"));
+txnTableEntry = getTxnTableEntry(testDB);
+assert.eq(txnTableEntry.state, "aborted");
+assert(!txnTableEntry.hasOwnProperty("startOpTime"));
+
+jsTestLog("Running another transaction on the new primary");
+newSession.startTransaction({writeConcern: {w: 3}});
+const secondDoc = {
+ _id: "second-doc"
+};
+assert.commandWorked(newSession.getDatabase(dbName).getCollection(collName).insert(secondDoc));
+assert.commandWorked(newSession.commitTransaction_forTesting());
+assert.docEq(testDB.getCollection(collName).find().toArray(), [secondDoc]);
+assert.docEq(newTestDB.getCollection(collName).find().toArray(), [secondDoc]);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/agg_write_concern_zero_batch_size.js b/jstests/replsets/agg_write_concern_zero_batch_size.js
index c5b64999166..7e9d91c70c7 100644
--- a/jstests/replsets/agg_write_concern_zero_batch_size.js
+++ b/jstests/replsets/agg_write_concern_zero_batch_size.js
@@ -1,73 +1,73 @@
// Tests that an aggregate sent with batchSize: 0 will still obey the write concern sent on the
// original request, even though the writes happen in the getMore.
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWriteStage.
- load("jstests/libs/write_concern_util.js"); // For [stop|restart]ServerReplication.
+load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWriteStage.
+load("jstests/libs/write_concern_util.js"); // For [stop|restart]ServerReplication.
- // Start a replica set with two nodes: one with the default configuration and one with priority
- // zero to ensure we don't have any elections.
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+// Start a replica set with two nodes: one with the default configuration and one with priority
+// zero to ensure we don't have any elections.
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const testDB = rst.getPrimary().getDB("test");
- const source = testDB.agg_write_concern_zero_batch_size;
- const target = testDB.agg_write_concern_zero_batch_size_target;
- assert.commandWorked(source.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
+const testDB = rst.getPrimary().getDB("test");
+const source = testDB.agg_write_concern_zero_batch_size;
+const target = testDB.agg_write_concern_zero_batch_size_target;
+assert.commandWorked(source.insert([{_id: 0}, {_id: 1}, {_id: 2}]));
- withEachKindOfWriteStage(target, (stageSpec) => {
- assert.commandWorked(target.remove({}));
+withEachKindOfWriteStage(target, (stageSpec) => {
+ assert.commandWorked(target.remove({}));
- // Start an aggregate cursor with a writing stage, but use batchSize: 0 to prevent any
- // writes from happening in this command.
- const response = assert.commandWorked(testDB.runCommand({
- aggregate: source.getName(),
- pipeline: [stageSpec],
- writeConcern: {w: 2, wtimeout: 100},
- cursor: {batchSize: 0}
- }));
- assert.neq(response.cursor.id, 0);
+ // Start an aggregate cursor with a writing stage, but use batchSize: 0 to prevent any
+ // writes from happening in this command.
+ const response = assert.commandWorked(testDB.runCommand({
+ aggregate: source.getName(),
+ pipeline: [stageSpec],
+ writeConcern: {w: 2, wtimeout: 100},
+ cursor: {batchSize: 0}
+ }));
+ assert.neq(response.cursor.id, 0);
- stopServerReplication(rst.getSecondary());
+ stopServerReplication(rst.getSecondary());
- const getMoreResponse = assert.commandFailedWithCode(
- testDB.runCommand({getMore: response.cursor.id, collection: source.getName()}),
- ErrorCodes.WriteConcernFailed);
+ const getMoreResponse = assert.commandFailedWithCode(
+ testDB.runCommand({getMore: response.cursor.id, collection: source.getName()}),
+ ErrorCodes.WriteConcernFailed);
- // Test the same thing but using the shell helpers.
- let error = assert.throws(
- () => source
- .aggregate([stageSpec],
- {cursor: {batchSize: 0}, writeConcern: {w: 2, wtimeout: 100}})
- .itcount());
- // Unfortunately this is the best way we have to check that the cause of the failure was due
- // to write concern. The aggregate shell helper will assert the command worked. When this
- // fails (as we expect due to write concern) it will create a new error object which loses
- // all structure and just preserves the information as text.
- assert(error instanceof Error);
- assert(tojson(error).indexOf("writeConcernError") != -1, tojson(error));
+ // Test the same thing but using the shell helpers.
+ let error = assert.throws(
+ () => source
+ .aggregate([stageSpec],
+ {cursor: {batchSize: 0}, writeConcern: {w: 2, wtimeout: 100}})
+ .itcount());
+ // Unfortunately this is the best way we have to check that the cause of the failure was due
+ // to write concern. The aggregate shell helper will assert the command worked. When this
+ // fails (as we expect due to write concern) it will create a new error object which loses
+ // all structure and just preserves the information as text.
+ assert(error instanceof Error);
+ assert(tojson(error).indexOf("writeConcernError") != -1, tojson(error));
- // Now test without batchSize just to be sure.
- error = assert.throws(
- () => source.aggregate([stageSpec], {writeConcern: {w: 2, wtimeout: 100}}));
- assert(error instanceof Error);
- assert(tojson(error).indexOf("writeConcernError") != -1, tojson(error));
+ // Now test without batchSize just to be sure.
+ error =
+ assert.throws(() => source.aggregate([stageSpec], {writeConcern: {w: 2, wtimeout: 100}}));
+ assert(error instanceof Error);
+ assert(tojson(error).indexOf("writeConcernError") != -1, tojson(error));
- // Now switch to legacy OP_GET_MORE read mode. We should get a different error indicating
- // that using writeConcern in this way is unsupported.
- source.getDB().getMongo().forceReadMode("legacy");
- error = assert.throws(
- () => source
- .aggregate([stageSpec],
- {cursor: {batchSize: 0}, writeConcern: {w: 2, wtimeout: 100}})
- .itcount());
- assert.eq(error.code, 31124);
- source.getDB().getMongo().forceReadMode("commands");
+ // Now switch to legacy OP_GET_MORE read mode. We should get a different error indicating
+ // that using writeConcern in this way is unsupported.
+ source.getDB().getMongo().forceReadMode("legacy");
+ error = assert.throws(
+ () => source
+ .aggregate([stageSpec],
+ {cursor: {batchSize: 0}, writeConcern: {w: 2, wtimeout: 100}})
+ .itcount());
+ assert.eq(error.code, 31124);
+ source.getDB().getMongo().forceReadMode("commands");
- restartServerReplication(rst.getSecondary());
- });
+ restartServerReplication(rst.getSecondary());
+});
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/aggregation_write_concern.js b/jstests/replsets/aggregation_write_concern.js
index 8622fc0f113..807da4bf244 100644
--- a/jstests/replsets/aggregation_write_concern.js
+++ b/jstests/replsets/aggregation_write_concern.js
@@ -3,47 +3,47 @@
* not wait for the writeConcern specified to be satisfied.
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries,
- // restartReplicationOnSecondaries
- const name = "aggregation_write_concern";
-
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
-
- replTest.startSet();
- replTest.initiate();
-
- const testDB = replTest.getPrimary().getDB(name);
- const collectionName = "test";
-
- // Stop replication and perform a w: 1 write. This will block subsequent 'writeConcern:
- // majority' reads if the read command waits on writeConcern.
-
- stopReplicationOnSecondaries(replTest);
- assert.commandWorked(
- testDB.runCommand({insert: collectionName, documents: [{_id: 1}], writeConcern: {w: 1}}));
-
- // A read-only aggregation accepts the writeConcern option but does not wait for it.
- let res = assert.commandWorked(testDB.runCommand({
- aggregate: collectionName,
- pipeline: [{$match: {_id: 1}}],
- cursor: {},
- writeConcern: {w: "majority"}
- }));
- assert(res.cursor.firstBatch.length);
- assert.eq(res.cursor.firstBatch[0], {_id: 1});
-
- // An aggregation pipeline that writes will block on writeConcern.
- assert.commandFailedWithCode(testDB.runCommand({
- aggregate: collectionName,
- pipeline: [{$match: {_id: 1}}, {$out: collectionName + "_out"}],
- cursor: {},
- writeConcern: {w: "majority", wtimeout: 1000}
- }),
- ErrorCodes.WriteConcernFailed);
-
- restartReplicationOnSecondaries(replTest);
- replTest.awaitLastOpCommitted();
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries,
+ // restartReplicationOnSecondaries
+const name = "aggregation_write_concern";
+
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+
+replTest.startSet();
+replTest.initiate();
+
+const testDB = replTest.getPrimary().getDB(name);
+const collectionName = "test";
+
+// Stop replication and perform a w: 1 write. This will block subsequent 'writeConcern:
+// majority' reads if the read command waits on writeConcern.
+
+stopReplicationOnSecondaries(replTest);
+assert.commandWorked(
+ testDB.runCommand({insert: collectionName, documents: [{_id: 1}], writeConcern: {w: 1}}));
+
+// A read-only aggregation accepts the writeConcern option but does not wait for it.
+let res = assert.commandWorked(testDB.runCommand({
+ aggregate: collectionName,
+ pipeline: [{$match: {_id: 1}}],
+ cursor: {},
+ writeConcern: {w: "majority"}
+}));
+assert(res.cursor.firstBatch.length);
+assert.eq(res.cursor.firstBatch[0], {_id: 1});
+
+// An aggregation pipeline that writes will block on writeConcern.
+assert.commandFailedWithCode(testDB.runCommand({
+ aggregate: collectionName,
+ pipeline: [{$match: {_id: 1}}, {$out: collectionName + "_out"}],
+ cursor: {},
+ writeConcern: {w: "majority", wtimeout: 1000}
+}),
+ ErrorCodes.WriteConcernFailed);
+
+restartReplicationOnSecondaries(replTest);
+replTest.awaitLastOpCommitted();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/already_checked_out_session.js b/jstests/replsets/already_checked_out_session.js
index 3a4ae11840e..ac5a76fbfe9 100644
--- a/jstests/replsets/already_checked_out_session.js
+++ b/jstests/replsets/already_checked_out_session.js
@@ -5,85 +5,81 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/parallelTester.js");
+load("jstests/libs/parallelTester.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
- function doInsertWithSession(host, lsid, txnNumber) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
- assert.commandWorked(db.runCommand({
- insert: "mycoll",
- documents: [{_id: txnNumber}],
- lsid: {id: eval(lsid)},
- txnNumber: NumberLong(txnNumber),
- }));
- return {ok: 1};
- } catch (e) {
- print("doInsertWithSession failed with " + e.toString());
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
+function doInsertWithSession(host, lsid, txnNumber) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
+ assert.commandWorked(db.runCommand({
+ insert: "mycoll",
+ documents: [{_id: txnNumber}],
+ lsid: {id: eval(lsid)},
+ txnNumber: NumberLong(txnNumber),
+ }));
+ return {ok: 1};
+ } catch (e) {
+ print("doInsertWithSession failed with " + e.toString());
+ return {ok: 0, error: e.toString(), stack: e.stack};
}
+}
- let thread1;
- let thread2;
+let thread1;
+let thread2;
- // We fsyncLock the server so that a transaction operation will block waiting for a lock.
- assert.commandWorked(db.fsyncLock());
- try {
- // JavaScript objects backed by C++ objects (e.g. BSON values) do not serialize correctly
- // when passed through the ScopedThread constructor. To work around this behavior, we
- // instead pass a stringified form of the JavaScript object through the ScopedThread
- // constructor and use eval() to rehydrate it.
- const lsid = UUID();
- thread1 = new ScopedThread(doInsertWithSession, primary.host, tojson(lsid), 1);
- thread1.start();
+// We fsyncLock the server so that a transaction operation will block waiting for a lock.
+assert.commandWorked(db.fsyncLock());
+try {
+ // JavaScript objects backed by C++ objects (e.g. BSON values) do not serialize correctly
+ // when passed through the ScopedThread constructor. To work around this behavior, we
+ // instead pass a stringified form of the JavaScript object through the ScopedThread
+ // constructor and use eval() to rehydrate it.
+ const lsid = UUID();
+ thread1 = new ScopedThread(doInsertWithSession, primary.host, tojson(lsid), 1);
+ thread1.start();
- assert.soon(
- () => {
- const ops = db.currentOp({
- "command.insert": "mycoll",
- "command.txnNumber": {$eq: 1},
- waitingForLock: true
- });
- return ops.inprog.length === 1;
- },
- () => {
- return "insert operation with txnNumber 1 was not found: " + tojson(db.currentOp());
- });
+ assert.soon(
+ () => {
+ const ops = db.currentOp(
+ {"command.insert": "mycoll", "command.txnNumber": {$eq: 1}, waitingForLock: true});
+ return ops.inprog.length === 1;
+ },
+ () => {
+ return "insert operation with txnNumber 1 was not found: " + tojson(db.currentOp());
+ });
- thread2 = new ScopedThread(doInsertWithSession, primary.host, tojson(lsid), 2);
- thread2.start();
+ thread2 = new ScopedThread(doInsertWithSession, primary.host, tojson(lsid), 2);
+ thread2.start();
- // Run currentOp() again to ensure that thread2 has started its insert command.
- assert.soon(
- () => {
- const ops =
- db.currentOp({"command.insert": "mycoll", "command.txnNumber": {$eq: 2}});
- return ops.inprog.length === 1;
- },
- () => {
- return "insert operation with txnNumber 2 was not found: " + tojson(db.currentOp());
- });
- } finally {
- // We run the fsyncUnlock command in a finally block to avoid leaving the server fsyncLock'd
- // if the test were to fail.
- assert.commandWorked(db.fsyncUnlock());
- }
+ // Run currentOp() again to ensure that thread2 has started its insert command.
+ assert.soon(
+ () => {
+ const ops = db.currentOp({"command.insert": "mycoll", "command.txnNumber": {$eq: 2}});
+ return ops.inprog.length === 1;
+ },
+ () => {
+ return "insert operation with txnNumber 2 was not found: " + tojson(db.currentOp());
+ });
+} finally {
+ // We run the fsyncUnlock command in a finally block to avoid leaving the server fsyncLock'd
+ // if the test were to fail.
+ assert.commandWorked(db.fsyncUnlock());
+}
- thread1.join();
- thread2.join();
+thread1.join();
+thread2.join();
- assert.commandWorked(thread1.returnData());
- assert.commandWorked(thread2.returnData());
+assert.commandWorked(thread1.returnData());
+assert.commandWorked(thread2.returnData());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js
index 2f8dc8d1ebc..a82ca0880cb 100644
--- a/jstests/replsets/apply_batch_only_goes_forward.js
+++ b/jstests/replsets/apply_batch_only_goes_forward.js
@@ -19,88 +19,90 @@
TestData.skipCheckDBHashes = true;
(function() {
- "use strict";
+"use strict";
- function tsToDate(ts) {
- return new Date(ts.getTime() * 1000);
- }
+function tsToDate(ts) {
+ return new Date(ts.getTime() * 1000);
+}
- var replTest =
- new ReplSetTest({name: "apply_batch_only_goes_forward", nodes: [{}, {}, {arbiter: true}]});
+var replTest =
+ new ReplSetTest({name: "apply_batch_only_goes_forward", nodes: [{}, {}, {arbiter: true}]});
- var nodes = replTest.startSet();
- replTest.initiate();
- var master = replTest.getPrimary();
- var mTest = master.getDB("test");
- var mLocal = master.getDB("local");
- var mMinvalid = mLocal["replset.minvalid"];
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+var mTest = master.getDB("test");
+var mLocal = master.getDB("local");
+var mMinvalid = mLocal["replset.minvalid"];
- var slave = replTest.getSecondary();
- var sTest = slave.getDB("test");
- var sLocal = slave.getDB("local");
- var sMinvalid = sLocal["replset.minvalid"];
- var stepDownSecs = 30;
- var stepDownCmd = {replSetStepDown: stepDownSecs, force: true};
+var slave = replTest.getSecondary();
+var sTest = slave.getDB("test");
+var sLocal = slave.getDB("local");
+var sMinvalid = sLocal["replset.minvalid"];
+var stepDownSecs = 30;
+var stepDownCmd = {replSetStepDown: stepDownSecs, force: true};
- // Write op
- assert.writeOK(mTest.foo.save(
- {}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- replTest.waitForState(slave, ReplSetTest.State.SECONDARY);
- assert.writeOK(mTest.foo.save(
- {}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+// Write op
+assert.writeOK(
+ mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+replTest.waitForState(slave, ReplSetTest.State.SECONDARY);
+assert.writeOK(
+ mTest.foo.save({}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- // Set minvalid to something far in the future for the current primary, to simulate recovery.
- // Note: This is so far in the future (5 days) that it will never become secondary.
- var farFutureTS = new Timestamp(
- Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days*/), 0);
+// Set minvalid to something far in the future for the current primary, to simulate recovery.
+// Note: This is so far in the future (5 days) that it will never become secondary.
+var farFutureTS = new Timestamp(
+ Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days*/), 0);
- jsTest.log("future TS: " + tojson(farFutureTS) + ", date:" + tsToDate(farFutureTS));
- // We do an update in case there is a minvalid document on the primary already.
- // If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures
- // that update returns details of the write, like whether an update or insert was performed.
- const minValidUpdate = {$set: {ts: farFutureTS}};
- jsTestLog("Current minvalid is " + tojson(mMinvalid.findOne()));
- jsTestLog("Updating minValid to: " + tojson(minValidUpdate));
- printjson(assert.writeOK(mMinvalid.update(
- {},
- minValidUpdate,
- {upsert: true, writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}})));
+jsTest.log("future TS: " + tojson(farFutureTS) + ", date:" + tsToDate(farFutureTS));
+// We do an update in case there is a minvalid document on the primary already.
+// If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures
+// that update returns details of the write, like whether an update or insert was performed.
+const minValidUpdate = {
+ $set: {ts: farFutureTS}
+};
+jsTestLog("Current minvalid is " + tojson(mMinvalid.findOne()));
+jsTestLog("Updating minValid to: " + tojson(minValidUpdate));
+printjson(assert.writeOK(mMinvalid.update(
+ {},
+ minValidUpdate,
+ {upsert: true, writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}})));
- jsTest.log('Restarting primary ' + master.host +
- ' with updated minValid. This node will go into RECOVERING upon restart. ' +
- 'Secondary ' + slave.host + ' will become new primary.');
- clearRawMongoProgramOutput();
- replTest.restart(master);
- printjson(sLocal.adminCommand("isMaster"));
- replTest.waitForState(master, ReplSetTest.State.RECOVERING);
+jsTest.log('Restarting primary ' + master.host +
+ ' with updated minValid. This node will go into RECOVERING upon restart. ' +
+ 'Secondary ' + slave.host + ' will become new primary.');
+clearRawMongoProgramOutput();
+replTest.restart(master);
+printjson(sLocal.adminCommand("isMaster"));
+replTest.waitForState(master, ReplSetTest.State.RECOVERING);
- replTest.awaitNodesAgreeOnPrimary();
- // Slave is now master... Do a write to advance the optime on the primary so that it will be
- // considered as a sync source - this is more relevant to PV0 because we do not write a new
- // entry to the oplog on becoming primary.
- assert.writeOK(replTest.getPrimary().getDB("test").foo.save(
- {}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+replTest.awaitNodesAgreeOnPrimary();
+// Slave is now master... Do a write to advance the optime on the primary so that it will be
+// considered as a sync source - this is more relevant to PV0 because we do not write a new
+// entry to the oplog on becoming primary.
+assert.writeOK(replTest.getPrimary().getDB("test").foo.save(
+ {}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- // Sync source selection will log this message if it does not detect min valid in the sync
- // source candidate's oplog.
- assert.soon(function() {
- return rawMongoProgramOutput().match(
- 'it does not contain the necessary operations for us to reach a consistent state');
- });
+// Sync source selection will log this message if it does not detect min valid in the sync
+// source candidate's oplog.
+assert.soon(function() {
+ return rawMongoProgramOutput().match(
+ 'it does not contain the necessary operations for us to reach a consistent state');
+});
- assert.soon(function() {
- var mv;
- try {
- mv = mMinvalid.findOne();
- } catch (e) {
- return false;
- }
- var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) +
- " - " + tsToDate(mv.ts);
- assert.eq(farFutureTS, mv.ts, msg);
- return true;
- });
+assert.soon(function() {
+ var mv;
+ try {
+ mv = mMinvalid.findOne();
+ } catch (e) {
+ return false;
+ }
+ var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) +
+ " - " + tsToDate(mv.ts);
+ assert.eq(farFutureTS, mv.ts, msg);
+ return true;
+});
- // Shut down the set and finish the test.
- replTest.stopSet();
+// Shut down the set and finish the test.
+replTest.stopSet();
})();
diff --git a/jstests/replsets/apply_batches_totalMillis.js b/jstests/replsets/apply_batches_totalMillis.js
index 9e093211cb6..fd8b2872065 100644
--- a/jstests/replsets/apply_batches_totalMillis.js
+++ b/jstests/replsets/apply_batches_totalMillis.js
@@ -5,59 +5,58 @@
*/
(function() {
- "use strict";
-
- // Gets the value of metrics.repl.apply.batches.totalMillis.
- function getTotalMillis(node) {
- return assert.commandWorked(node.adminCommand({serverStatus: 1}))
- .metrics.repl.apply.batches.totalMillis;
- }
-
- // Do a bulk insert of documents as: {{key: 0}, {key: 1}, {key: 2}, ... , {key: num-1}}
- function performBulkInsert(coll, key, num) {
- let bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < num; i++) {
- let doc = {};
- doc[key] = i;
- bulk.insert(doc);
- }
- assert.writeOK(bulk.execute());
- rst.awaitReplication();
+"use strict";
+
+// Gets the value of metrics.repl.apply.batches.totalMillis.
+function getTotalMillis(node) {
+ return assert.commandWorked(node.adminCommand({serverStatus: 1}))
+ .metrics.repl.apply.batches.totalMillis;
+}
+
+// Do a bulk insert of documents as: {{key: 0}, {key: 1}, {key: 2}, ... , {key: num-1}}
+function performBulkInsert(coll, key, num) {
+ let bulk = coll.initializeUnorderedBulkOp();
+ for (let i = 0; i < num; i++) {
+ let doc = {};
+ doc[key] = i;
+ bulk.insert(doc);
}
+ assert.writeOK(bulk.execute());
+ rst.awaitReplication();
+}
- let name = "apply_batches_totalMillis";
- let rst = new ReplSetTest({name: name, nodes: 2});
- rst.startSet();
- rst.initiate();
-
- let primary = rst.getPrimary();
- let secondary = rst.getSecondary();
- let coll = primary.getDB(name)["foo"];
+let name = "apply_batches_totalMillis";
+let rst = new ReplSetTest({name: name, nodes: 2});
+rst.startSet();
+rst.initiate();
- // Perform an initial write on the system and ensure steady state.
- assert.writeOK(coll.insert({init: 0}));
- rst.awaitReplication();
- let baseTime = getTotalMillis(secondary);
+let primary = rst.getPrimary();
+let secondary = rst.getSecondary();
+let coll = primary.getDB(name)["foo"];
- // Introduce a small load and wait for it to be replicated.
- performBulkInsert(coll, "small", 1000);
+// Perform an initial write on the system and ensure steady state.
+assert.writeOK(coll.insert({init: 0}));
+rst.awaitReplication();
+let baseTime = getTotalMillis(secondary);
- // Record the time spent applying the small load.
- let timeAfterSmall = getTotalMillis(secondary);
- let deltaSmall = timeAfterSmall - baseTime;
+// Introduce a small load and wait for it to be replicated.
+performBulkInsert(coll, "small", 1000);
- // Insert a significantly larger load.
- performBulkInsert(coll, "large", 20000);
+// Record the time spent applying the small load.
+let timeAfterSmall = getTotalMillis(secondary);
+let deltaSmall = timeAfterSmall - baseTime;
- // Record the time spent applying the large load.
- let timeAfterLarge = getTotalMillis(secondary);
- let deltaLarge = timeAfterLarge - timeAfterSmall;
+// Insert a significantly larger load.
+performBulkInsert(coll, "large", 20000);
- jsTestLog(`Recorded deltas: {small: ${deltaSmall}ms, large: ${deltaLarge}ms}.`);
+// Record the time spent applying the large load.
+let timeAfterLarge = getTotalMillis(secondary);
+let deltaLarge = timeAfterLarge - timeAfterSmall;
- // We should have recorded at least as much time on the second load as we did on the first.
- // This is a crude comparison that is only taken to check that the timer is used correctly.
- assert(deltaLarge >= deltaSmall, "Expected a higher net totalMillis for the larger load.");
- rst.stopSet();
+jsTestLog(`Recorded deltas: {small: ${deltaSmall}ms, large: ${deltaLarge}ms}.`);
+// We should have recorded at least as much time on the second load as we did on the first.
+// This is a crude comparison that is only taken to check that the timer is used correctly.
+assert(deltaLarge >= deltaSmall, "Expected a higher net totalMillis for the larger load.");
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js b/jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js
index 05cb6f9e996..a5444cb51cb 100644
--- a/jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js
+++ b/jstests/replsets/apply_ops_concurrent_non_atomic_different_db.js
@@ -1,11 +1,11 @@
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
+load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
- new ApplyOpsConcurrentNonAtomicTest({
- ns1: 'test1.coll1',
- ns2: 'test2.coll2',
- requiresDocumentLevelConcurrency: false,
- }).run();
+new ApplyOpsConcurrentNonAtomicTest({
+ ns1: 'test1.coll1',
+ ns2: 'test2.coll2',
+ requiresDocumentLevelConcurrency: false,
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js b/jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js
index 004eeaaa52f..d6346e95a3a 100644
--- a/jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js
+++ b/jstests/replsets/apply_ops_concurrent_non_atomic_same_collection.js
@@ -1,11 +1,11 @@
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
+load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
- new ApplyOpsConcurrentNonAtomicTest({
- ns1: 'test.coll',
- ns2: 'test.coll',
- requiresDocumentLevelConcurrency: true,
- }).run();
+new ApplyOpsConcurrentNonAtomicTest({
+ ns1: 'test.coll',
+ ns2: 'test.coll',
+ requiresDocumentLevelConcurrency: true,
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js b/jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js
index 10f874382a5..5553aa1341a 100644
--- a/jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js
+++ b/jstests/replsets/apply_ops_concurrent_non_atomic_same_db.js
@@ -1,11 +1,11 @@
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
+load('jstests/replsets/libs/apply_ops_concurrent_non_atomic.js');
- new ApplyOpsConcurrentNonAtomicTest({
- ns1: 'test.coll1',
- ns2: 'test.coll2',
- requiresDocumentLevelConcurrency: false,
- }).run();
+new ApplyOpsConcurrentNonAtomicTest({
+ ns1: 'test.coll1',
+ ns2: 'test.coll2',
+ requiresDocumentLevelConcurrency: false,
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_create_indexes.js b/jstests/replsets/apply_ops_create_indexes.js
index ea97ce5fd9d..f2244efcba9 100644
--- a/jstests/replsets/apply_ops_create_indexes.js
+++ b/jstests/replsets/apply_ops_create_indexes.js
@@ -3,121 +3,114 @@
* correctly (see SERVER-31435).
*/
(function() {
- "use strict";
- let ensureIndexExists = function(testDB, collName, indexName, expectedNumIndexes) {
- let cmd = {listIndexes: collName};
- let res = testDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- let indexes = new DBCommandCursor(testDB, res).toArray();
+"use strict";
+let ensureIndexExists = function(testDB, collName, indexName, expectedNumIndexes) {
+ let cmd = {listIndexes: collName};
+ let res = testDB.runCommand(cmd);
+ assert.commandWorked(res, "could not run " + tojson(cmd));
+ let indexes = new DBCommandCursor(testDB, res).toArray();
- assert.eq(indexes.length, expectedNumIndexes);
+ assert.eq(indexes.length, expectedNumIndexes);
- let foundIndex = false;
- for (let i = 0; i < indexes.length; ++i) {
- if (indexes[i].name == indexName) {
- foundIndex = true;
- }
+ let foundIndex = false;
+ for (let i = 0; i < indexes.length; ++i) {
+ if (indexes[i].name == indexName) {
+ foundIndex = true;
}
- assert(foundIndex,
- "did not find the index '" + indexName + "' amongst the collection indexes: " +
- tojson(indexes));
- };
+ }
+ assert(foundIndex,
+ "did not find the index '" + indexName +
+ "' amongst the collection indexes: " + tojson(indexes));
+};
- let ensureOplogEntryExists = function(localDB, indexName) {
- // Make sure the oplog entry for index creation exists in the oplog.
- let cmd = {find: "oplog.rs"};
- let res = localDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- let cursor = new DBCommandCursor(localDB, res);
- let errMsg =
- "expected more data from command " + tojson(cmd) + ", with result " + tojson(res);
- assert(cursor.hasNext(), errMsg);
- let oplog = localDB.getCollection("oplog.rs");
- let query = {$and: [{"o.createIndexes": {$exists: true}}, {"o.name": indexName}]};
- let resCursor = oplog.find(query);
- assert.eq(resCursor.count(),
- 1,
- "Expected the query " + tojson(query) + " to return exactly 1 document");
- };
+let ensureOplogEntryExists = function(localDB, indexName) {
+ // Make sure the oplog entry for index creation exists in the oplog.
+ let cmd = {find: "oplog.rs"};
+ let res = localDB.runCommand(cmd);
+ assert.commandWorked(res, "could not run " + tojson(cmd));
+ let cursor = new DBCommandCursor(localDB, res);
+ let errMsg = "expected more data from command " + tojson(cmd) + ", with result " + tojson(res);
+ assert(cursor.hasNext(), errMsg);
+ let oplog = localDB.getCollection("oplog.rs");
+ let query = {$and: [{"o.createIndexes": {$exists: true}}, {"o.name": indexName}]};
+ let resCursor = oplog.find(query);
+ assert.eq(resCursor.count(),
+ 1,
+ "Expected the query " + tojson(query) + " to return exactly 1 document");
+};
- let rst = new ReplSetTest({nodes: 3});
- rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({nodes: 3});
+rst.startSet();
+rst.initiate();
- let collName = "create_indexes_col";
- let dbName = "create_indexes_db";
+let collName = "create_indexes_col";
+let dbName = "create_indexes_db";
- let primaryTestDB = rst.getPrimary().getDB(dbName);
- let cmd = {"create": collName};
- let res = primaryTestDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
+let primaryTestDB = rst.getPrimary().getDB(dbName);
+let cmd = {"create": collName};
+let res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
- // Create an index via the applyOps command with the createIndexes command format and make sure
- // it exists.
- let uuid = primaryTestDB.getCollectionInfos()[0].info.uuid;
- let cmdFormatIndexNameA = "a_1";
- cmd = {
- applyOps: [{
- op: "c",
- ns: dbName + "." + collName,
- ui: uuid,
- o: {createIndexes: collName, v: 2, key: {a: 1}, name: cmdFormatIndexNameA}
- }]
- };
- res = primaryTestDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
- ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameA, 2);
+// Create an index via the applyOps command with the createIndexes command format and make sure
+// it exists.
+let uuid = primaryTestDB.getCollectionInfos()[0].info.uuid;
+let cmdFormatIndexNameA = "a_1";
+cmd = {
+ applyOps: [{
+ op: "c",
+ ns: dbName + "." + collName,
+ ui: uuid,
+ o: {createIndexes: collName, v: 2, key: {a: 1}, name: cmdFormatIndexNameA}
+ }]
+};
+res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
+ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameA, 2);
- // Same as directly above, but ensure that applyOps createIndexes can work without a uuid.
- let cmdFormatIndexNameB = "b_1";
- cmd = {
- applyOps: [{
- op: "c",
- ns: dbName + "." + collName,
- o: {createIndexes: collName, v: 2, key: {b: 1}, name: cmdFormatIndexNameB}
- }]
- };
- res = primaryTestDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
- ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameB, 3);
+// Same as directly above, but ensure that applyOps createIndexes can work without a uuid.
+let cmdFormatIndexNameB = "b_1";
+cmd = {
+ applyOps: [{
+ op: "c",
+ ns: dbName + "." + collName,
+ o: {createIndexes: collName, v: 2, key: {b: 1}, name: cmdFormatIndexNameB}
+ }]
+};
+res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
+ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameB, 3);
- // Test with a background index.
- let cmdFormatIndexNameC = "c_1";
- cmd = {
- applyOps: [{
- op: "c",
- ns: dbName + "." + collName,
- ui: uuid,
- o: {
- createIndexes: collName,
- v: 2,
- key: {c: 1},
- name: cmdFormatIndexNameC,
- background: true
- }
- }]
- };
- assert.commandWorked(primaryTestDB.runCommand(cmd));
- rst.awaitReplication();
- ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameC, 4);
+// Test with a background index.
+let cmdFormatIndexNameC = "c_1";
+cmd = {
+ applyOps: [{
+ op: "c",
+ ns: dbName + "." + collName,
+ ui: uuid,
+ o: {createIndexes: collName, v: 2, key: {c: 1}, name: cmdFormatIndexNameC, background: true}
+ }]
+};
+assert.commandWorked(primaryTestDB.runCommand(cmd));
+rst.awaitReplication();
+ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameC, 4);
- let localDB = rst.getPrimary().getDB("local");
- ensureOplogEntryExists(localDB, cmdFormatIndexNameA);
- ensureOplogEntryExists(localDB, cmdFormatIndexNameB);
- ensureOplogEntryExists(localDB, cmdFormatIndexNameC);
+let localDB = rst.getPrimary().getDB("local");
+ensureOplogEntryExists(localDB, cmdFormatIndexNameA);
+ensureOplogEntryExists(localDB, cmdFormatIndexNameB);
+ensureOplogEntryExists(localDB, cmdFormatIndexNameC);
- // Make sure the indexes were replicated to the secondaries.
- rst.waitForAllIndexBuildsToFinish(dbName, collName);
- let secondaries = rst.getSecondaries();
- for (let j = 0; j < secondaries.length; j++) {
- let secondaryTestDB = secondaries[j].getDB(dbName);
- ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameA, 4);
- ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameB, 4);
- ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameC, 4);
- }
+// Make sure the indexes were replicated to the secondaries.
+rst.waitForAllIndexBuildsToFinish(dbName, collName);
+let secondaries = rst.getSecondaries();
+for (let j = 0; j < secondaries.length; j++) {
+ let secondaryTestDB = secondaries[j].getDB(dbName);
+ ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameA, 4);
+ ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameB, 4);
+ ensureIndexExists(secondaryTestDB, collName, cmdFormatIndexNameC, 4);
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/apply_ops_create_view.js b/jstests/replsets/apply_ops_create_view.js
index 0aac71ee490..9535790f5a9 100644
--- a/jstests/replsets/apply_ops_create_view.js
+++ b/jstests/replsets/apply_ops_create_view.js
@@ -1,19 +1,21 @@
(function() {
- // Test applyOps behavior for view creation.
- "use strict";
+// Test applyOps behavior for view creation.
+"use strict";
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const db = replTest.getPrimary().getDB('test');
+const db = replTest.getPrimary().getDB('test');
- assert.commandWorked(db.createCollection("bar"));
- assert.writeOK(db.bar.insert({a: 1, b: "hi"}));
+assert.commandWorked(db.createCollection("bar"));
+assert.writeOK(db.bar.insert({a: 1, b: "hi"}));
- const cmd = {applyOps: [{op: "c", ns: db + ".$cmd", o: {create: "foo", viewOn: "bar"}}]};
- assert.commandWorked(db.runCommand(cmd), tojson(cmd));
- assert.eq(db.foo.findOne({a: 1}).b, "hi");
+const cmd = {
+ applyOps: [{op: "c", ns: db + ".$cmd", o: {create: "foo", viewOn: "bar"}}]
+};
+assert.commandWorked(db.runCommand(cmd), tojson(cmd));
+assert.eq(db.foo.findOne({a: 1}).b, "hi");
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/apply_ops_create_with_uuid.js b/jstests/replsets/apply_ops_create_with_uuid.js
index d3f69fc2a71..258fd0b6525 100644
--- a/jstests/replsets/apply_ops_create_with_uuid.js
+++ b/jstests/replsets/apply_ops_create_with_uuid.js
@@ -1,51 +1,50 @@
(function() {
- // Test applyOps behavior for collection creation with explicit UUIDs.
- "use strict";
-
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- const db = replTest.getPrimary().getDB('test');
-
- const uuid = UUID();
- // Two applyOps to create a foo collection with given uuid, one each for 'test' and 'test2' dbs.
- var ops = (uuid => ["test", "test2"].map(db => {
- return {op: "c", ns: db + ".$cmd", ui: uuid, o: {create: "foo"}};
- }))(uuid);
-
- function checkUUID(coll, uuid) {
- const cmd = {listCollections: 1, filter: {name: coll}};
- const res = assert.commandWorked(db.runCommand(cmd), tojson(cmd));
- assert.eq(res.cursor.firstBatch[0].info.uuid,
- uuid,
- tojson(cmd) + " did not return expected uuid: " + tojson(res));
- }
-
- jsTestLog("Create a test.foo collection with uuid " + uuid + " through applyOps.");
- let cmd = {applyOps: [ops[0]]};
- let res = assert.commandWorked(db.runCommand(cmd), tojson(cmd));
-
- // Check that test.foo has the expected UUID.
- jsTestLog("Check that test.foo has UUID " + uuid);
- checkUUID("foo", uuid);
-
- // Change the ops to refer to bar, instead of foo. Command should still work, renaming the
- // collection. Second command should fail as it tries to associate the "test2.foo" name with
- // an existing collection in the "test" database. This must fail.
- jsTestLog("Create test.bar and try to create test2.foo collections with the same UUID.");
- ops[0].o.create = "bar";
- res = assert.commandFailed(db.runCommand({applyOps: ops}));
- assert.eq(res.results,
- [true, false],
- "expected first operation " + tojson(ops[0]) + " to succeed, and second operation " +
- tojson(ops[1]) + " to fail, got " + tojson(res));
-
- jsTestLog("Check that test.bar has UUID " + uuid);
- checkUUID("bar", uuid);
- jsTestLog("Check that test.foo no longer exists");
- assert.eq(db.getCollectionInfos({name: "foo"}).length,
- 0,
- "expected foo collection to no longer exist");
- replTest.stopSet();
+// Test applyOps behavior for collection creation with explicit UUIDs.
+"use strict";
+
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+const db = replTest.getPrimary().getDB('test');
+
+const uuid = UUID();
+// Two applyOps to create a foo collection with given uuid, one each for 'test' and 'test2' dbs.
+var ops = (uuid => ["test", "test2"].map(db => {
+ return {op: "c", ns: db + ".$cmd", ui: uuid, o: {create: "foo"}};
+}))(uuid);
+
+function checkUUID(coll, uuid) {
+ const cmd = {listCollections: 1, filter: {name: coll}};
+ const res = assert.commandWorked(db.runCommand(cmd), tojson(cmd));
+ assert.eq(res.cursor.firstBatch[0].info.uuid,
+ uuid,
+ tojson(cmd) + " did not return expected uuid: " + tojson(res));
+}
+
+jsTestLog("Create a test.foo collection with uuid " + uuid + " through applyOps.");
+let cmd = {applyOps: [ops[0]]};
+let res = assert.commandWorked(db.runCommand(cmd), tojson(cmd));
+
+// Check that test.foo has the expected UUID.
+jsTestLog("Check that test.foo has UUID " + uuid);
+checkUUID("foo", uuid);
+
+// Change the ops to refer to bar, instead of foo. Command should still work, renaming the
+// collection. Second command should fail as it tries to associate the "test2.foo" name with
+// an existing collection in the "test" database. This must fail.
+jsTestLog("Create test.bar and try to create test2.foo collections with the same UUID.");
+ops[0].o.create = "bar";
+res = assert.commandFailed(db.runCommand({applyOps: ops}));
+assert.eq(res.results,
+ [true, false],
+ "expected first operation " + tojson(ops[0]) + " to succeed, and second operation " +
+ tojson(ops[1]) + " to fail, got " + tojson(res));
+
+jsTestLog("Check that test.bar has UUID " + uuid);
+checkUUID("bar", uuid);
+jsTestLog("Check that test.foo no longer exists");
+assert.eq(
+ db.getCollectionInfos({name: "foo"}).length, 0, "expected foo collection to no longer exist");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/apply_ops_idempotency.js b/jstests/replsets/apply_ops_idempotency.js
index 7173bbd0301..1a3167bdbde 100644
--- a/jstests/replsets/apply_ops_idempotency.js
+++ b/jstests/replsets/apply_ops_idempotency.js
@@ -1,199 +1,199 @@
(function() {
- 'use strict';
- const debug = 0;
-
- let rst = new ReplSetTest({name: "applyOpsIdempotency", nodes: 1});
- rst.startSet();
- rst.initiate();
-
- /**
- * Returns true if this database contains any drop-pending collections.
- */
- function containsDropPendingCollection(mydb) {
- const res =
- assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: true}));
- const collectionInfos = res.cursor.firstBatch;
- const collectionNames = collectionInfos.map(c => c.name);
- return Boolean(collectionNames.find(c => c.indexOf('system.drop.') == 0));
- }
-
- /**
- * Apply ops on mydb, asserting success.
- */
- function assertApplyOpsWorks(testdbs, ops) {
- // Remaining operations in ops must still be applied
- while (ops.length) {
- let cmd = {applyOps: ops};
- let res = testdbs[0].adminCommand(cmd);
- if (debug) {
- printjson({applyOps: ops, res});
- }
-
- // Wait for any drop-pending collections to be removed by the reaper before proceeding.
- assert.soon(function() {
- return !testdbs.find(mydb => containsDropPendingCollection(mydb));
- });
-
- // If the entire operation succeeded, we're done.
- if (res.ok == 1)
- return res;
-
- // Skip any operations that succeeded.
- while (res.applied-- && res.results.shift())
- ops.shift();
-
- // These errors are expected when replaying operations and should be ignored.
- if (res.code == ErrorCodes.NamespaceNotFound || res.code == ErrorCodes.DuplicateKey) {
- ops.shift();
- continue;
- }
-
- // Generate the appropriate error message.
- assert.commandWorked(res, tojson(cmd));
+'use strict';
+const debug = 0;
+
+let rst = new ReplSetTest({name: "applyOpsIdempotency", nodes: 1});
+rst.startSet();
+rst.initiate();
+
+/**
+ * Returns true if this database contains any drop-pending collections.
+ */
+function containsDropPendingCollection(mydb) {
+ const res =
+ assert.commandWorked(mydb.runCommand("listCollections", {includePendingDrops: true}));
+ const collectionInfos = res.cursor.firstBatch;
+ const collectionNames = collectionInfos.map(c => c.name);
+ return Boolean(collectionNames.find(c => c.indexOf('system.drop.') == 0));
+}
+
+/**
+ * Apply ops on mydb, asserting success.
+ */
+function assertApplyOpsWorks(testdbs, ops) {
+ // Remaining operations in ops must still be applied
+ while (ops.length) {
+ let cmd = {applyOps: ops};
+ let res = testdbs[0].adminCommand(cmd);
+ if (debug) {
+ printjson({applyOps: ops, res});
}
- }
- /**
- * Run the dbHash command on mydb, assert it worked and return the md5.
- */
- function dbHash(mydb) {
- let cmd = {dbHash: 1};
- let res = mydb.runCommand(cmd);
- assert.commandWorked(res, tojson(cmd));
- return res.md5;
- }
-
- /**
- * Gather collection info and dbHash results of each of the passed databases.
- */
- function dbInfo(dbs) {
- return dbs.map((db) => {
- return {name: db.getName(), info: db.getCollectionInfos(), md5: dbHash(db)};
+ // Wait for any drop-pending collections to be removed by the reaper before proceeding.
+ assert.soon(function() {
+ return !testdbs.find(mydb => containsDropPendingCollection(mydb));
});
- }
- var getCollections = (mydb, prefixes) => prefixes.map((prefix) => mydb[prefix]);
-
- /**
- * Test functions to run and test using replay of oplog.
- */
- var tests = {
- crud: (mydb) => {
- let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
- assert.writeOK(x.insert({_id: 1}));
- assert.writeOK(x.update({_id: 1}, {$set: {x: 1}}));
- assert.writeOK(x.remove({_id: 1}));
-
- assert.writeOK(y.update({_id: 1}, {y: 1}));
- assert.writeOK(y.insert({_id: 2, y: false, z: false}));
- assert.writeOK(y.update({_id: 2}, {y: 2}));
-
- assert.writeOK(z.insert({_id: 1, z: 1}));
- assert.writeOK(z.remove({_id: 1}));
- assert.writeOK(z.insert({_id: 1}));
- assert.writeOK(z.insert({_id: 2, z: 2}));
- },
- renameCollectionWithinDatabase: (mydb) => {
- let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
- assert.writeOK(x.insert({_id: 1, x: 1}));
- assert.writeOK(y.insert({_id: 1, y: 1}));
-
- assert.commandWorked(x.renameCollection(z.getName()));
- assert.writeOK(z.insert({_id: 2, x: 2}));
- assert.writeOK(x.insert({_id: 2, x: false}));
- assert.writeOK(y.insert({y: 2}));
-
- assert.commandWorked(y.renameCollection(x.getName(), true));
- assert.commandWorked(z.renameCollection(y.getName()));
- },
- renameCollectionWithinDatabaseDroppingTargetByUUID: (mydb) => {
- assert.commandWorked(mydb.createCollection("x"));
- assert.commandWorked(mydb.createCollection("y"));
- assert.commandWorked(mydb.createCollection("z"));
-
- assert.commandWorked(mydb.x.renameCollection('xx'));
- // When replayed on a up-to-date db, this oplog entry may drop
- // collection z rather than collection x if the dropTarget is not
- // specified by UUID. (See SERVER-33087)
- assert.commandWorked(mydb.y.renameCollection('xx', true));
- assert.commandWorked(mydb.xx.renameCollection('yy'));
- assert.commandWorked(mydb.z.renameCollection('xx'));
- },
- renameCollectionWithinDatabaseDropTargetEvenWhenSourceIsEmpty: (mydb) => {
- assert.commandWorked(mydb.createCollection("x"));
- assert.commandWorked(mydb.createCollection("y"));
- assert.commandWorked(mydb.x.renameCollection('y', true));
- assert(mydb.y.drop());
- },
- renameCollectionAcrossDatabases: (mydb) => {
- let otherdb = mydb.getSiblingDB(mydb + '_');
- let [x, y] = getCollections(mydb, ['x', 'y']);
- let [z] = getCollections(otherdb, ['z']);
- assert.writeOK(x.insert({_id: 1, x: 1}));
- assert.writeOK(y.insert({_id: 1, y: 1}));
-
- assert.commandWorked(
- mydb.adminCommand({renameCollection: x.getFullName(), to: z.getFullName()}));
- assert.writeOK(z.insert({_id: 2, x: 2}));
- assert.writeOK(x.insert({_id: 2, x: false}));
- assert.writeOK(y.insert({y: 2}));
-
- assert.commandWorked(mydb.adminCommand(
- {renameCollection: y.getFullName(), to: x.getFullName(), dropTarget: true}));
- assert.commandWorked(
- mydb.adminCommand({renameCollection: z.getFullName(), to: y.getFullName()}));
- return [mydb, otherdb];
- },
- createIndex: (mydb) => {
- let [x, y] = getCollections(mydb, ['x', 'y']);
- assert.commandWorked(x.createIndex({x: 1}));
- assert.writeOK(x.insert({_id: 1, x: 1}));
- assert.writeOK(y.insert({_id: 1, y: 1}));
- assert.commandWorked(y.createIndex({y: 1}));
- assert.writeOK(y.insert({_id: 2, y: 2}));
- },
- };
-
- /**
- * Create a new uniquely named database, execute testFun and compute the dbHash. Then replay
- * all different suffixes of the oplog and check for the correct hash. If testFun creates
- * additional databases, it should return an array with all databases to check.
- */
- function testIdempotency(primary, testFun, testName) {
- // Create a new database name, so it's easier to filter out our oplog records later.
- let dbname = (new Date()).toISOString().match(/[-0-9T]/g).join(''); // 2017-05-30T155055713
- let mydb = primary.getDB(dbname);
-
- // Allow testFun to return the array of databases to check (default is mydb).
- let testdbs = testFun(mydb) || [mydb];
- let expectedInfo = dbInfo(testdbs);
-
- let oplog = mydb.getSiblingDB('local').oplog.rs;
- let ops = oplog
- .find({op: {$ne: 'n'}, ns: new RegExp('^' + mydb.getName())},
- {ts: 0, t: 0, h: 0, v: 0})
- .toArray();
- assert.gt(ops.length, 0, 'Could not find any matching ops in the oplog');
- testdbs.forEach((db) => assert.commandWorked(db.dropDatabase()));
+ // If the entire operation succeeded, we're done.
+ if (res.ok == 1)
+ return res;
- if (debug) {
- print(testName + ': replaying suffixes of ' + ops.length + ' operations');
- printjson(ops);
- }
+ // Skip any operations that succeeded.
+ while (res.applied-- && res.results.shift())
+ ops.shift();
- for (let j = 0; j < ops.length; j++) {
- let replayOps = ops.slice(j);
- assertApplyOpsWorks(testdbs, replayOps);
- let actualInfo = dbInfo(testdbs);
- assert.eq(actualInfo,
- expectedInfo,
- 'unexpected differences between databases after replaying final ' +
- replayOps.length + ' ops in test ' + testName + ": " + tojson(replayOps));
+ // These errors are expected when replaying operations and should be ignored.
+ if (res.code == ErrorCodes.NamespaceNotFound || res.code == ErrorCodes.DuplicateKey) {
+ ops.shift();
+ continue;
}
+
+ // Generate the appropriate error message.
+ assert.commandWorked(res, tojson(cmd));
+ }
+}
+
+/**
+ * Run the dbHash command on mydb, assert it worked and return the md5.
+ */
+function dbHash(mydb) {
+ let cmd = {dbHash: 1};
+ let res = mydb.runCommand(cmd);
+ assert.commandWorked(res, tojson(cmd));
+ return res.md5;
+}
+
+/**
+ * Gather collection info and dbHash results of each of the passed databases.
+ */
+function dbInfo(dbs) {
+ return dbs.map((db) => {
+ return {name: db.getName(), info: db.getCollectionInfos(), md5: dbHash(db)};
+ });
+}
+
+var getCollections = (mydb, prefixes) => prefixes.map((prefix) => mydb[prefix]);
+
+/**
+ * Test functions to run and test using replay of oplog.
+ */
+var tests = {
+ crud: (mydb) => {
+ let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
+ assert.writeOK(x.insert({_id: 1}));
+ assert.writeOK(x.update({_id: 1}, {$set: {x: 1}}));
+ assert.writeOK(x.remove({_id: 1}));
+
+ assert.writeOK(y.update({_id: 1}, {y: 1}));
+ assert.writeOK(y.insert({_id: 2, y: false, z: false}));
+ assert.writeOK(y.update({_id: 2}, {y: 2}));
+
+ assert.writeOK(z.insert({_id: 1, z: 1}));
+ assert.writeOK(z.remove({_id: 1}));
+ assert.writeOK(z.insert({_id: 1}));
+ assert.writeOK(z.insert({_id: 2, z: 2}));
+ },
+ renameCollectionWithinDatabase: (mydb) => {
+ let [x, y, z] = getCollections(mydb, ['x', 'y', 'z']);
+ assert.writeOK(x.insert({_id: 1, x: 1}));
+ assert.writeOK(y.insert({_id: 1, y: 1}));
+
+ assert.commandWorked(x.renameCollection(z.getName()));
+ assert.writeOK(z.insert({_id: 2, x: 2}));
+ assert.writeOK(x.insert({_id: 2, x: false}));
+ assert.writeOK(y.insert({y: 2}));
+
+ assert.commandWorked(y.renameCollection(x.getName(), true));
+ assert.commandWorked(z.renameCollection(y.getName()));
+ },
+ renameCollectionWithinDatabaseDroppingTargetByUUID: (mydb) => {
+ assert.commandWorked(mydb.createCollection("x"));
+ assert.commandWorked(mydb.createCollection("y"));
+ assert.commandWorked(mydb.createCollection("z"));
+
+ assert.commandWorked(mydb.x.renameCollection('xx'));
+ // When replayed on a up-to-date db, this oplog entry may drop
+ // collection z rather than collection x if the dropTarget is not
+ // specified by UUID. (See SERVER-33087)
+ assert.commandWorked(mydb.y.renameCollection('xx', true));
+ assert.commandWorked(mydb.xx.renameCollection('yy'));
+ assert.commandWorked(mydb.z.renameCollection('xx'));
+ },
+ renameCollectionWithinDatabaseDropTargetEvenWhenSourceIsEmpty: (mydb) => {
+ assert.commandWorked(mydb.createCollection("x"));
+ assert.commandWorked(mydb.createCollection("y"));
+ assert.commandWorked(mydb.x.renameCollection('y', true));
+ assert(mydb.y.drop());
+ },
+ renameCollectionAcrossDatabases: (mydb) => {
+ let otherdb = mydb.getSiblingDB(mydb + '_');
+ let [x, y] = getCollections(mydb, ['x', 'y']);
+ let [z] = getCollections(otherdb, ['z']);
+ assert.writeOK(x.insert({_id: 1, x: 1}));
+ assert.writeOK(y.insert({_id: 1, y: 1}));
+
+ assert.commandWorked(
+ mydb.adminCommand({renameCollection: x.getFullName(), to: z.getFullName()}));
+ assert.writeOK(z.insert({_id: 2, x: 2}));
+ assert.writeOK(x.insert({_id: 2, x: false}));
+ assert.writeOK(y.insert({y: 2}));
+
+ assert.commandWorked(mydb.adminCommand(
+ {renameCollection: y.getFullName(), to: x.getFullName(), dropTarget: true}));
+ assert.commandWorked(
+ mydb.adminCommand({renameCollection: z.getFullName(), to: y.getFullName()}));
+ return [mydb, otherdb];
+ },
+ createIndex: (mydb) => {
+ let [x, y] = getCollections(mydb, ['x', 'y']);
+ assert.commandWorked(x.createIndex({x: 1}));
+ assert.writeOK(x.insert({_id: 1, x: 1}));
+ assert.writeOK(y.insert({_id: 1, y: 1}));
+ assert.commandWorked(y.createIndex({y: 1}));
+ assert.writeOK(y.insert({_id: 2, y: 2}));
+ },
+};
+
+/**
+ * Create a new uniquely named database, execute testFun and compute the dbHash. Then replay
+ * all different suffixes of the oplog and check for the correct hash. If testFun creates
+ * additional databases, it should return an array with all databases to check.
+ */
+function testIdempotency(primary, testFun, testName) {
+ // Create a new database name, so it's easier to filter out our oplog records later.
+ let dbname = (new Date()).toISOString().match(/[-0-9T]/g).join(''); // 2017-05-30T155055713
+ let mydb = primary.getDB(dbname);
+
+ // Allow testFun to return the array of databases to check (default is mydb).
+ let testdbs = testFun(mydb) || [mydb];
+ let expectedInfo = dbInfo(testdbs);
+
+ let oplog = mydb.getSiblingDB('local').oplog.rs;
+ let ops =
+ oplog
+ .find({op: {$ne: 'n'}, ns: new RegExp('^' + mydb.getName())}, {ts: 0, t: 0, h: 0, v: 0})
+ .toArray();
+ assert.gt(ops.length, 0, 'Could not find any matching ops in the oplog');
+ testdbs.forEach((db) => assert.commandWorked(db.dropDatabase()));
+
+ if (debug) {
+ print(testName + ': replaying suffixes of ' + ops.length + ' operations');
+ printjson(ops);
+ }
+
+ for (let j = 0; j < ops.length; j++) {
+ let replayOps = ops.slice(j);
+ assertApplyOpsWorks(testdbs, replayOps);
+ let actualInfo = dbInfo(testdbs);
+ assert.eq(actualInfo,
+ expectedInfo,
+ 'unexpected differences between databases after replaying final ' +
+ replayOps.length + ' ops in test ' + testName + ": " + tojson(replayOps));
}
+}
- for (let f in tests)
- testIdempotency(rst.getPrimary(), tests[f], f);
+for (let f in tests)
+ testIdempotency(rst.getPrimary(), tests[f], f);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/apply_ops_insert_write_conflict_atomic.js b/jstests/replsets/apply_ops_insert_write_conflict_atomic.js
index 7f8870bc75a..47d35d4e7df 100644
--- a/jstests/replsets/apply_ops_insert_write_conflict_atomic.js
+++ b/jstests/replsets/apply_ops_insert_write_conflict_atomic.js
@@ -1,10 +1,10 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/apply_ops_insert_write_conflict.js");
+load("jstests/replsets/libs/apply_ops_insert_write_conflict.js");
- new ApplyOpsInsertWriteConflictTest({
- testName: 'apply_ops_insert_write_conflict_atomic',
- atomic: true
- }).run();
+new ApplyOpsInsertWriteConflictTest({
+ testName: 'apply_ops_insert_write_conflict_atomic',
+ atomic: true
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js b/jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js
index 2e91de8637a..a7843716bac 100644
--- a/jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js
+++ b/jstests/replsets/apply_ops_insert_write_conflict_nonatomic.js
@@ -1,10 +1,10 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/apply_ops_insert_write_conflict.js");
+load("jstests/replsets/libs/apply_ops_insert_write_conflict.js");
- new ApplyOpsInsertWriteConflictTest({
- testName: 'apply_ops_insert_write_conflict_nonatomic',
- atomic: false
- }).run();
+new ApplyOpsInsertWriteConflictTest({
+ testName: 'apply_ops_insert_write_conflict_nonatomic',
+ atomic: false
+}).run();
}());
diff --git a/jstests/replsets/apply_ops_lastop.js b/jstests/replsets/apply_ops_lastop.js
index a2d45ab459f..e1c9fdb1823 100644
--- a/jstests/replsets/apply_ops_lastop.js
+++ b/jstests/replsets/apply_ops_lastop.js
@@ -4,54 +4,49 @@
//
(function() {
- "use strict";
-
- var rs = new ReplSetTest({name: "applyOpsOptimeTest", nodes: 3, waitForKeys: true});
- rs.startSet();
- var nodes = rs.nodeList();
- rs.initiate({
- "_id": "applyOpsOptimeTest",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
- var primary = rs.getPrimary();
- var db = primary.getDB('foo');
- var coll = primary.getCollection('foo.bar');
- // Two connections
- var m1 = new Mongo(primary.host);
- var m2 = new Mongo(primary.host);
-
- var insertApplyOps = [{op: "i", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
- var deleteApplyOps = [{op: "d", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
- var badPreCondition = [{ns: 'foo.bar', q: {_id: 10, a: "aaa"}, res: {a: "aaa"}}];
- var majorityWriteConcern = {w: 'majority', wtimeout: 30000};
-
- // Set up some data
- assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works
- assert.commandWorked(
- m1.getDB('foo').runCommand({applyOps: insertApplyOps, writeConcern: majorityWriteConcern}));
- var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
-
- // No-op applyOps
- var res = m2.getDB('foo').runCommand({
- applyOps: deleteApplyOps,
- preCondition: badPreCondition,
- writeConcern: majorityWriteConcern
- });
- assert.commandFailed(res, "The applyOps command was expected to fail, but instead succeeded.");
- assert.eq(
- res.errmsg, "preCondition failed", "The applyOps command failed for the wrong reason.");
- var noOp = m2.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
-
- // Check that each connection has the same last optime
- assert.eq(noOp,
- insertOp,
- "The connections' last optimes do " +
- "not match: applyOps failed to update lastop on no-op");
-
- rs.stopSet();
-
+"use strict";
+
+var rs = new ReplSetTest({name: "applyOpsOptimeTest", nodes: 3, waitForKeys: true});
+rs.startSet();
+var nodes = rs.nodeList();
+rs.initiate({
+ "_id": "applyOpsOptimeTest",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
+var primary = rs.getPrimary();
+var db = primary.getDB('foo');
+var coll = primary.getCollection('foo.bar');
+// Two connections
+var m1 = new Mongo(primary.host);
+var m2 = new Mongo(primary.host);
+
+var insertApplyOps = [{op: "i", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
+var deleteApplyOps = [{op: "d", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
+var badPreCondition = [{ns: 'foo.bar', q: {_id: 10, a: "aaa"}, res: {a: "aaa"}}];
+var majorityWriteConcern = {w: 'majority', wtimeout: 30000};
+
+// Set up some data
+assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works
+assert.commandWorked(
+ m1.getDB('foo').runCommand({applyOps: insertApplyOps, writeConcern: majorityWriteConcern}));
+var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
+
+// No-op applyOps
+var res = m2.getDB('foo').runCommand(
+ {applyOps: deleteApplyOps, preCondition: badPreCondition, writeConcern: majorityWriteConcern});
+assert.commandFailed(res, "The applyOps command was expected to fail, but instead succeeded.");
+assert.eq(res.errmsg, "preCondition failed", "The applyOps command failed for the wrong reason.");
+var noOp = m2.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
+
+// Check that each connection has the same last optime
+assert.eq(noOp,
+ insertOp,
+ "The connections' last optimes do " +
+ "not match: applyOps failed to update lastop on no-op");
+
+rs.stopSet();
})();
diff --git a/jstests/replsets/apply_ops_wc.js b/jstests/replsets/apply_ops_wc.js
index 5e7747fe343..8b3b6d4e92f 100644
--- a/jstests/replsets/apply_ops_wc.js
+++ b/jstests/replsets/apply_ops_wc.js
@@ -10,130 +10,127 @@
*/
(function() {
- "use strict";
- var nodeCount = 3;
- var replTest = new ReplSetTest({name: 'applyOpsWCSet', nodes: nodeCount});
- replTest.startSet();
- var cfg = replTest.getReplSetConfig();
- cfg.settings = {};
- cfg.settings.chainingAllowed = false;
- replTest.initiate(cfg);
-
- var testDB = "applyOps-wc-test";
-
- // Get test collection.
- var master = replTest.getPrimary();
- var db = master.getDB(testDB);
- var coll = db.apply_ops_wc;
-
- function dropTestCollection() {
- coll.drop();
- assert.eq(0, coll.find().itcount(), "test collection not empty");
- }
+"use strict";
+var nodeCount = 3;
+var replTest = new ReplSetTest({name: 'applyOpsWCSet', nodes: nodeCount});
+replTest.startSet();
+var cfg = replTest.getReplSetConfig();
+cfg.settings = {};
+cfg.settings.chainingAllowed = false;
+replTest.initiate(cfg);
+
+var testDB = "applyOps-wc-test";
+
+// Get test collection.
+var master = replTest.getPrimary();
+var db = master.getDB(testDB);
+var coll = db.apply_ops_wc;
+
+function dropTestCollection() {
+ coll.drop();
+ assert.eq(0, coll.find().itcount(), "test collection not empty");
+}
+
+dropTestCollection();
+
+// Set up the applyOps command.
+var applyOpsReq = {
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "b"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "c"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "d"}},
+ ]
+};
+
+function assertApplyOpsCommandWorked(res) {
+ assert.eq(3, res.applied);
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq([true, true, true], res.results);
+}
+
+function assertWriteConcernError(res) {
+ assert(res.writeConcernError);
+ assert(res.writeConcernError.code);
+ assert(res.writeConcernError.errmsg);
+}
+
+var invalidWriteConcerns = [{w: 'invalid'}, {w: nodeCount + 1}];
+
+function testInvalidWriteConcern(wc) {
+ jsTest.log("Testing invalid write concern " + tojson(wc));
+
+ applyOpsReq.writeConcern = wc;
+ var res = coll.runCommand(applyOpsReq);
+ assertApplyOpsCommandWorked(res);
+ assertWriteConcernError(res);
+}
+
+// Verify that invalid write concerns yield an error.
+coll.insert({_id: 1, x: "a"});
+invalidWriteConcerns.forEach(testInvalidWriteConcern);
+
+var secondaries = replTest.getSecondaries();
+
+var majorityWriteConcerns = [
+ {w: 2, wtimeout: 30000},
+ {w: 'majority', wtimeout: 30000},
+];
+
+function testMajorityWriteConcerns(wc) {
+ jsTest.log("Testing " + tojson(wc));
+
+ // Reset secondaries to ensure they can replicate.
+ secondaries[0].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+ secondaries[1].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+
+ // Set the writeConcern of the applyOps command.
+ applyOpsReq.writeConcern = wc;
dropTestCollection();
- // Set up the applyOps command.
- var applyOpsReq = {
- applyOps: [
- {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "b"}},
- {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "c"}},
- {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "d"}},
- ]
- };
-
- function assertApplyOpsCommandWorked(res) {
- assert.eq(3, res.applied);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq([true, true, true], res.results);
- }
-
- function assertWriteConcernError(res) {
- assert(res.writeConcernError);
- assert(res.writeConcernError.code);
- assert(res.writeConcernError.errmsg);
- }
-
- var invalidWriteConcerns = [{w: 'invalid'}, {w: nodeCount + 1}];
-
- function testInvalidWriteConcern(wc) {
- jsTest.log("Testing invalid write concern " + tojson(wc));
-
- applyOpsReq.writeConcern = wc;
- var res = coll.runCommand(applyOpsReq);
- assertApplyOpsCommandWorked(res);
- assertWriteConcernError(res);
- }
-
- // Verify that invalid write concerns yield an error.
+ // applyOps with a full replica set should succeed.
coll.insert({_id: 1, x: "a"});
- invalidWriteConcerns.forEach(testInvalidWriteConcern);
-
- var secondaries = replTest.getSecondaries();
-
- var majorityWriteConcerns = [
- {w: 2, wtimeout: 30000},
- {w: 'majority', wtimeout: 30000},
- ];
-
- function testMajorityWriteConcerns(wc) {
- jsTest.log("Testing " + tojson(wc));
-
- // Reset secondaries to ensure they can replicate.
- secondaries[0].getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
- secondaries[1].getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
-
- // Set the writeConcern of the applyOps command.
- applyOpsReq.writeConcern = wc;
+ var res = db.runCommand(applyOpsReq);
- dropTestCollection();
+ assertApplyOpsCommandWorked(res);
+ assert(!res.writeConcernError,
+ 'applyOps on a full replicaset had writeConcern error ' + tojson(res.writeConcernError));
- // applyOps with a full replica set should succeed.
- coll.insert({_id: 1, x: "a"});
- var res = db.runCommand(applyOpsReq);
-
- assertApplyOpsCommandWorked(res);
- assert(!res.writeConcernError,
- 'applyOps on a full replicaset had writeConcern error ' +
- tojson(res.writeConcernError));
-
- dropTestCollection();
+ dropTestCollection();
- // Stop replication at one secondary.
- secondaries[0].getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+ // Stop replication at one secondary.
+ secondaries[0].getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
- // applyOps should succeed with only 1 node not replicating.
- coll.insert({_id: 1, x: "a"});
- res = db.runCommand(applyOpsReq);
+ // applyOps should succeed with only 1 node not replicating.
+ coll.insert({_id: 1, x: "a"});
+ res = db.runCommand(applyOpsReq);
- assertApplyOpsCommandWorked(res);
- assert(!res.writeConcernError,
- 'applyOps on a replicaset with 2 working nodes had writeConcern error ' +
- tojson(res.writeConcernError));
+ assertApplyOpsCommandWorked(res);
+ assert(!res.writeConcernError,
+ 'applyOps on a replicaset with 2 working nodes had writeConcern error ' +
+ tojson(res.writeConcernError));
- dropTestCollection();
+ dropTestCollection();
- // Stop replication at a second secondary.
- secondaries[1].getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+ // Stop replication at a second secondary.
+ secondaries[1].getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
- // applyOps should fail after two nodes have stopped replicating.
- coll.insert({_id: 1, x: "a"});
- applyOpsReq.writeConcern.wtimeout = 5000;
- res = db.runCommand(applyOpsReq);
+ // applyOps should fail after two nodes have stopped replicating.
+ coll.insert({_id: 1, x: "a"});
+ applyOpsReq.writeConcern.wtimeout = 5000;
+ res = db.runCommand(applyOpsReq);
- assertApplyOpsCommandWorked(res);
- assertWriteConcernError(res);
- }
+ assertApplyOpsCommandWorked(res);
+ assertWriteConcernError(res);
+}
- majorityWriteConcerns.forEach(testMajorityWriteConcerns);
+majorityWriteConcerns.forEach(testMajorityWriteConcerns);
- // Allow clean shutdown
- secondaries[0].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
- secondaries[1].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+// Allow clean shutdown
+secondaries[0].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+secondaries[1].getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/apply_transaction_with_yield.js b/jstests/replsets/apply_transaction_with_yield.js
index 19042ebcd88..67bff9b8dd3 100644
--- a/jstests/replsets/apply_transaction_with_yield.js
+++ b/jstests/replsets/apply_transaction_with_yield.js
@@ -7,38 +7,38 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- const name = "change_stream_speculative_majority";
- const replTest = new ReplSetTest({name: name, nodes: [{}, {rsConfig: {priority: 0}}]});
- replTest.startSet();
- replTest.initiate();
+const name = "change_stream_speculative_majority";
+const replTest = new ReplSetTest({name: name, nodes: [{}, {rsConfig: {priority: 0}}]});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const collName = "coll";
+const dbName = name;
+const collName = "coll";
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
- // Collections used in a transaction should be explicitly created first.
- assert.commandWorked(primary.getDB(dbName).createCollection(collName));
+// Collections used in a transaction should be explicitly created first.
+assert.commandWorked(primary.getDB(dbName).createCollection(collName));
- // Force the secondary to yield at ever opportunity.
- assert.commandWorked(
- secondary.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
+// Force the secondary to yield at ever opportunity.
+assert.commandWorked(
+ secondary.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
- // Create a transaction that is substantially larger than 16MB, forcing the secondary to apply
- // it in multiple batches, so that it uses the TransactionHistoryIterator.
- const session = primary.startSession();
- session.startTransaction({readConcern: {level: "majority"}});
- const sessionColl = session.getDatabase(dbName)[collName];
- for (let i = 0; i < 3; i = i + 1) {
- assert.commandWorked(sessionColl.insert({a: 'x'.repeat(15 * 1024 * 1024)}));
- }
- session.commitTransaction();
+// Create a transaction that is substantially larger than 16MB, forcing the secondary to apply
+// it in multiple batches, so that it uses the TransactionHistoryIterator.
+const session = primary.startSession();
+session.startTransaction({readConcern: {level: "majority"}});
+const sessionColl = session.getDatabase(dbName)[collName];
+for (let i = 0; i < 3; i = i + 1) {
+ assert.commandWorked(sessionColl.insert({a: 'x'.repeat(15 * 1024 * 1024)}));
+}
+session.commitTransaction();
- // Make sure the transaction has been fully applied.
- replTest.awaitReplication();
+// Make sure the transaction has been fully applied.
+replTest.awaitReplication();
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/arbiters_not_included_in_w2_wc.js b/jstests/replsets/arbiters_not_included_in_w2_wc.js
index 6ea19cc55a5..ca391185e30 100644
--- a/jstests/replsets/arbiters_not_included_in_w2_wc.js
+++ b/jstests/replsets/arbiters_not_included_in_w2_wc.js
@@ -10,44 +10,43 @@
*/
(function() {
- "use strict";
+"use strict";
- const name = "arbiters_not_included_in_w2_wc";
- const rst = new ReplSetTest({name: name, nodes: 5});
- const nodes = rst.nodeList();
+const name = "arbiters_not_included_in_w2_wc";
+const rst = new ReplSetTest({name: name, nodes: 5});
+const nodes = rst.nodeList();
- rst.startSet();
- rst.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0, votes: 0},
- {"_id": 2, "host": nodes[2], priority: 0, votes: 0},
- {"_id": 3, "host": nodes[3], "arbiterOnly": true},
- {"_id": 4, "host": nodes[4], "arbiterOnly": true}
- ]
- });
+rst.startSet();
+rst.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0, votes: 0},
+ {"_id": 2, "host": nodes[2], priority: 0, votes: 0},
+ {"_id": 3, "host": nodes[3], "arbiterOnly": true},
+ {"_id": 4, "host": nodes[4], "arbiterOnly": true}
+ ]
+});
- const dbName = "test";
- const collName = name;
+const dbName = "test";
+const collName = name;
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(
- testColl.insert({"a": 1}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assert.commandWorked(
+ testColl.insert({"a": 1}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- jsTestLog("Shutting down both secondaries");
+jsTestLog("Shutting down both secondaries");
- rst.stop(1);
- rst.stop(2);
+rst.stop(1);
+rst.stop(2);
- jsTestLog("Issuing a w:2 write and confirming that it times out");
+jsTestLog("Issuing a w:2 write and confirming that it times out");
- assert.commandFailedWithCode(
- testColl.insert({"b": 2}, {writeConcern: {w: 2, wtimeout: 5 * 1000}}),
- ErrorCodes.WriteConcernFailed);
+assert.commandFailedWithCode(testColl.insert({"b": 2}, {writeConcern: {w: 2, wtimeout: 5 * 1000}}),
+ ErrorCodes.WriteConcernFailed);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/arbiters_not_included_in_w3_wc.js b/jstests/replsets/arbiters_not_included_in_w3_wc.js
index 8e6cbf360f7..aaf35cb4501 100644
--- a/jstests/replsets/arbiters_not_included_in_w3_wc.js
+++ b/jstests/replsets/arbiters_not_included_in_w3_wc.js
@@ -10,42 +10,41 @@
*/
(function() {
- "use strict";
+"use strict";
- const name = "arbiters_not_included_in_w3_wc";
- const rst = new ReplSetTest({name: name, nodes: 4});
- const nodes = rst.nodeList();
+const name = "arbiters_not_included_in_w3_wc";
+const rst = new ReplSetTest({name: name, nodes: 4});
+const nodes = rst.nodeList();
- rst.startSet();
- rst.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], priority: 0, votes: 0},
- {"_id": 3, "host": nodes[3], "arbiterOnly": true}
- ]
- });
+rst.startSet();
+rst.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], priority: 0, votes: 0},
+ {"_id": 3, "host": nodes[3], "arbiterOnly": true}
+ ]
+});
- const dbName = "test";
- const collName = name;
+const dbName = "test";
+const collName = name;
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(
- testColl.insert({"a": 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assert.commandWorked(
+ testColl.insert({"a": 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- jsTestLog("Shutting down the non-voting secondary");
+jsTestLog("Shutting down the non-voting secondary");
- rst.stop(2);
+rst.stop(2);
- jsTestLog("Issuing a w:3 write and confirming that it times out");
+jsTestLog("Issuing a w:3 write and confirming that it times out");
- assert.commandFailedWithCode(
- testColl.insert({"b": 2}, {writeConcern: {w: 3, wtimeout: 5 * 1000}}),
- ErrorCodes.WriteConcernFailed);
+assert.commandFailedWithCode(testColl.insert({"b": 2}, {writeConcern: {w: 3, wtimeout: 5 * 1000}}),
+ ErrorCodes.WriteConcernFailed);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index 9a9f36939b2..42dc2638c28 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -6,212 +6,201 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- var name = "rs_auth1";
- var port = allocatePorts(5);
- var path = "jstests/libs/";
-
- // These keyFiles have their permissions set to 600 later in the test.
- var key1_600 = path + "key1";
- var key2_600 = path + "key2";
-
- // This keyFile has its permissions set to 644 later in the test.
- var key1_644 = path + "key1_644";
-
- print("try starting mongod with auth");
- var m = MongoRunner.runMongod(
- {auth: "", port: port[4], dbpath: MongoRunner.dataDir + "/wrong-auth"});
-
- assert.eq(m.getDB("local").auth("__system", ""), 0);
-
- MongoRunner.stopMongod(m);
-
- print("reset permissions");
- run("chmod", "644", key1_644);
-
- print("try starting mongod");
- m = runMongoProgram("mongod",
- "--keyFile",
- key1_644,
- "--port",
- port[0],
- "--dbpath",
- MongoRunner.dataPath + name);
-
- print("should fail with wrong permissions");
- assert.eq(
- m, _isWindows() ? 100 : 1, "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open");
-
- // Pre-populate the data directory for the first replica set node, to be started later, with
- // a user's credentials.
- print("add a user to server0: foo");
- m = MongoRunner.runMongod({dbpath: MongoRunner.dataPath + name + "-0"});
- m.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
- m.getDB("test").createUser({user: "bar", pwd: "baz", roles: jsTest.basicUserRoles});
- print("make sure user is written before shutting down");
- MongoRunner.stopMongod(m);
-
- print("start up rs");
- var rs = new ReplSetTest({"name": name, "nodes": 3});
-
- // The first node is started with the pre-populated data directory.
- print("start 0 with keyFile");
- m = rs.start(0, {"keyFile": key1_600, noCleanData: true});
- print("start 1 with keyFile");
- rs.start(1, {"keyFile": key1_600});
- print("start 2 with keyFile");
- rs.start(2, {"keyFile": key1_600});
-
- var result = m.getDB("admin").auth("foo", "bar");
- assert.eq(result, 1, "login failed");
- print("Initializing replSet with config: " + tojson(rs.getReplSetConfig()));
- result = m.getDB("admin").runCommand({replSetInitiate: rs.getReplSetConfig()});
- assert.eq(result.ok, 1, "couldn't initiate: " + tojson(result));
- m.getDB('admin')
- .logout(); // In case this node doesn't become primary, make sure its not auth'd
-
- var master = rs.getPrimary();
- rs.awaitSecondaryNodes();
- var mId = rs.getNodeId(master);
- var slave = rs._slaves[0];
- assert.eq(1, master.getDB("admin").auth("foo", "bar"));
- assert.writeOK(master.getDB("test").foo.insert(
- {x: 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
-
- print("try some legal and illegal reads");
- var r = master.getDB("test").foo.findOne();
- assert.eq(r.x, 1);
-
- slave.setSlaveOk();
-
- function doQueryOn(p) {
- var error = assert
- .throws(
- function() {
- r = p.getDB("test").foo.findOne();
- },
- [],
- "find did not throw, returned: " + tojson(r))
- .toString();
- printjson(error);
- assert.gt(error.indexOf("command find requires authentication"), -1, "error was non-auth");
- }
-
- doQueryOn(slave);
- master.adminCommand({logout: 1});
-
- print("unauthorized:");
- printjson(master.adminCommand({replSetGetStatus: 1}));
-
- doQueryOn(master);
-
- result = slave.getDB("test").auth("bar", "baz");
- assert.eq(result, 1);
-
- r = slave.getDB("test").foo.findOne();
- assert.eq(r.x, 1);
-
- print("add some data");
- master.getDB("test").auth("bar", "baz");
- var bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- bulk.insert({x: i, foo: "bar"});
- }
- assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
-
- print("fail over");
- rs.stop(mId);
-
- master = rs.getPrimary();
-
- print("add some more data 1");
- master.getDB("test").auth("bar", "baz");
- bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- bulk.insert({x: i, foo: "bar"});
- }
- assert.writeOK(bulk.execute({w: 2}));
-
- print("resync");
- rs.restart(mId, {"keyFile": key1_600});
- master = rs.getPrimary();
-
- print("add some more data 2");
- bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- bulk.insert({x: i, foo: "bar"});
- }
- bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS});
-
- print("add member with wrong key");
- var conn = MongoRunner.runMongod({
- dbpath: MongoRunner.dataPath + name + "-3",
- port: port[3],
- replSet: "rs_auth1",
- oplogSize: 2,
- keyFile: key2_600
- });
-
- master.getDB("admin").auth("foo", "bar");
- var config = master.getDB("local").system.replset.findOne();
- config.members.push({_id: 3, host: rs.host + ":" + port[3]});
- config.version++;
+"use strict";
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+var name = "rs_auth1";
+var port = allocatePorts(5);
+var path = "jstests/libs/";
+
+// These keyFiles have their permissions set to 600 later in the test.
+var key1_600 = path + "key1";
+var key2_600 = path + "key2";
+
+// This keyFile has its permissions set to 644 later in the test.
+var key1_644 = path + "key1_644";
+
+print("try starting mongod with auth");
+var m =
+ MongoRunner.runMongod({auth: "", port: port[4], dbpath: MongoRunner.dataDir + "/wrong-auth"});
+
+assert.eq(m.getDB("local").auth("__system", ""), 0);
+
+MongoRunner.stopMongod(m);
+
+print("reset permissions");
+run("chmod", "644", key1_644);
+
+print("try starting mongod");
+m = runMongoProgram(
+ "mongod", "--keyFile", key1_644, "--port", port[0], "--dbpath", MongoRunner.dataPath + name);
+
+print("should fail with wrong permissions");
+assert.eq(
+ m, _isWindows() ? 100 : 1, "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open");
+
+// Pre-populate the data directory for the first replica set node, to be started later, with
+// a user's credentials.
+print("add a user to server0: foo");
+m = MongoRunner.runMongod({dbpath: MongoRunner.dataPath + name + "-0"});
+m.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
+m.getDB("test").createUser({user: "bar", pwd: "baz", roles: jsTest.basicUserRoles});
+print("make sure user is written before shutting down");
+MongoRunner.stopMongod(m);
+
+print("start up rs");
+var rs = new ReplSetTest({"name": name, "nodes": 3});
+
+// The first node is started with the pre-populated data directory.
+print("start 0 with keyFile");
+m = rs.start(0, {"keyFile": key1_600, noCleanData: true});
+print("start 1 with keyFile");
+rs.start(1, {"keyFile": key1_600});
+print("start 2 with keyFile");
+rs.start(2, {"keyFile": key1_600});
+
+var result = m.getDB("admin").auth("foo", "bar");
+assert.eq(result, 1, "login failed");
+print("Initializing replSet with config: " + tojson(rs.getReplSetConfig()));
+result = m.getDB("admin").runCommand({replSetInitiate: rs.getReplSetConfig()});
+assert.eq(result.ok, 1, "couldn't initiate: " + tojson(result));
+m.getDB('admin').logout(); // In case this node doesn't become primary, make sure its not auth'd
+
+var master = rs.getPrimary();
+rs.awaitSecondaryNodes();
+var mId = rs.getNodeId(master);
+var slave = rs._slaves[0];
+assert.eq(1, master.getDB("admin").auth("foo", "bar"));
+assert.writeOK(master.getDB("test").foo.insert(
+ {x: 1}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+
+print("try some legal and illegal reads");
+var r = master.getDB("test").foo.findOne();
+assert.eq(r.x, 1);
+
+slave.setSlaveOk();
+
+function doQueryOn(p) {
+ var error = assert.throws(function() {
+ r = p.getDB("test").foo.findOne();
+ }, [], "find did not throw, returned: " + tojson(r)).toString();
+ printjson(error);
+ assert.gt(error.indexOf("command find requires authentication"), -1, "error was non-auth");
+}
+
+doQueryOn(slave);
+master.adminCommand({logout: 1});
+
+print("unauthorized:");
+printjson(master.adminCommand({replSetGetStatus: 1}));
+
+doQueryOn(master);
+
+result = slave.getDB("test").auth("bar", "baz");
+assert.eq(result, 1);
+
+r = slave.getDB("test").foo.findOne();
+assert.eq(r.x, 1);
+
+print("add some data");
+master.getDB("test").auth("bar", "baz");
+var bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, foo: "bar"});
+}
+assert.writeOK(bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}));
+
+print("fail over");
+rs.stop(mId);
+
+master = rs.getPrimary();
+
+print("add some more data 1");
+master.getDB("test").auth("bar", "baz");
+bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, foo: "bar"});
+}
+assert.writeOK(bulk.execute({w: 2}));
+
+print("resync");
+rs.restart(mId, {"keyFile": key1_600});
+master = rs.getPrimary();
+
+print("add some more data 2");
+bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, foo: "bar"});
+}
+bulk.execute({w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS});
+
+print("add member with wrong key");
+var conn = MongoRunner.runMongod({
+ dbpath: MongoRunner.dataPath + name + "-3",
+ port: port[3],
+ replSet: "rs_auth1",
+ oplogSize: 2,
+ keyFile: key2_600
+});
+
+master.getDB("admin").auth("foo", "bar");
+var config = master.getDB("local").system.replset.findOne();
+config.members.push({_id: 3, host: rs.host + ":" + port[3]});
+config.version++;
+try {
+ master.adminCommand({replSetReconfig: config});
+} catch (e) {
+ print("error: " + e);
+}
+master = rs.getPrimary();
+master.getDB("admin").auth("foo", "bar");
+
+print("shouldn't ever sync");
+for (var i = 0; i < 10; i++) {
+ print("iteration: " + i);
+ var results = master.adminCommand({replSetGetStatus: 1});
+ printjson(results);
+ assert(results.members[3].state != 2);
+ sleep(1000);
+}
+
+print("stop member");
+MongoRunner.stopMongod(conn);
+
+print("start back up with correct key");
+var conn = MongoRunner.runMongod({
+ dbpath: MongoRunner.dataPath + name + "-3",
+ port: port[3],
+ replSet: "rs_auth1",
+ oplogSize: 2,
+ keyFile: key1_600
+});
+
+wait(function() {
try {
- master.adminCommand({replSetReconfig: config});
- } catch (e) {
- print("error: " + e);
- }
- master = rs.getPrimary();
- master.getDB("admin").auth("foo", "bar");
-
- print("shouldn't ever sync");
- for (var i = 0; i < 10; i++) {
- print("iteration: " + i);
var results = master.adminCommand({replSetGetStatus: 1});
printjson(results);
- assert(results.members[3].state != 2);
- sleep(1000);
+ return results.members[3].state == 2;
+ } catch (e) {
+ print(e);
}
-
- print("stop member");
- MongoRunner.stopMongod(conn);
-
- print("start back up with correct key");
- var conn = MongoRunner.runMongod({
- dbpath: MongoRunner.dataPath + name + "-3",
- port: port[3],
- replSet: "rs_auth1",
- oplogSize: 2,
- keyFile: key1_600
- });
-
- wait(function() {
- try {
- var results = master.adminCommand({replSetGetStatus: 1});
- printjson(results);
- return results.members[3].state == 2;
- } catch (e) {
- print(e);
+ return false;
+});
+
+print("make sure it has the config, too");
+assert.soon(function() {
+ for (var i in rs.nodes) {
+ rs.nodes[i].setSlaveOk();
+ rs.nodes[i].getDB("admin").auth("foo", "bar");
+ config = rs.nodes[i].getDB("local").system.replset.findOne();
+ if (config.version != 2) {
+ return false;
}
- return false;
- });
-
- print("make sure it has the config, too");
- assert.soon(function() {
- for (var i in rs.nodes) {
- rs.nodes[i].setSlaveOk();
- rs.nodes[i].getDB("admin").auth("foo", "bar");
- config = rs.nodes[i].getDB("local").system.replset.findOne();
- if (config.version != 2) {
- return false;
- }
- }
- return true;
- });
- MongoRunner.stopMongod(conn);
- rs.stopSet();
+ }
+ return true;
+});
+MongoRunner.stopMongod(conn);
+rs.stopSet();
})();
diff --git a/jstests/replsets/auth2.js b/jstests/replsets/auth2.js
index 92d1c10e23a..b2eed7b4f1a 100644
--- a/jstests/replsets/auth2.js
+++ b/jstests/replsets/auth2.js
@@ -9,75 +9,75 @@
TestData.skipGossipingClusterTime = true;
(function() {
- var testInvalidAuthStates = function(replSetTest) {
- print("check that 0 is in recovering");
- replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.RECOVERING);
+var testInvalidAuthStates = function(replSetTest) {
+ print("check that 0 is in recovering");
+ replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.RECOVERING);
- print("shut down 1, 0 still in recovering.");
- replSetTest.stop(1);
- sleep(5);
-
- replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.RECOVERING);
-
- print("shut down 2, 0 becomes a secondary.");
- replSetTest.stop(2);
-
- replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.SECONDARY);
-
- replSetTest.restart(1, {"keyFile": key1});
- replSetTest.restart(2, {"keyFile": key1});
- };
-
- var name = "rs_auth2";
- var path = "jstests/libs/";
-
- // These keyFiles have their permissions set to 600 later in the test.
- var key1 = path + "key1";
- var key2 = path + "key2";
+ print("shut down 1, 0 still in recovering.");
+ replSetTest.stop(1);
+ sleep(5);
- var replSetTest = new ReplSetTest({name: name, nodes: 3, waitForKeys: true});
- var nodes = replSetTest.startSet();
- var hostnames = replSetTest.nodeList();
- replSetTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": hostnames[0], "priority": 2},
- {"_id": 1, "host": hostnames[1], priority: 0},
- {"_id": 2, "host": hostnames[2], priority: 0}
- ]
- });
+ replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.RECOVERING);
- var master = replSetTest.getPrimary();
+ print("shut down 2, 0 becomes a secondary.");
+ replSetTest.stop(2);
- print("add an admin user");
- master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: replSetTest.kDefaultTimeoutMS});
- var m = replSetTest.nodes[0];
+ replSetTest.waitForState(replSetTest.nodes[0], ReplSetTest.State.SECONDARY);
- print("starting 1 and 2 with key file");
- replSetTest.stop(1);
replSetTest.restart(1, {"keyFile": key1});
- replSetTest.stop(2);
replSetTest.restart(2, {"keyFile": key1});
-
- // auth to all nodes with auth
- replSetTest.nodes[1].getDB("admin").auth("foo", "bar");
- replSetTest.nodes[2].getDB("admin").auth("foo", "bar");
- testInvalidAuthStates(replSetTest);
-
- print("restart mongod with bad keyFile");
-
- replSetTest.stop(0);
- m = replSetTest.restart(0, {"keyFile": key2});
-
- // auth to all nodes
- replSetTest.nodes[0].getDB("admin").auth("foo", "bar");
- replSetTest.nodes[1].getDB("admin").auth("foo", "bar");
- replSetTest.nodes[2].getDB("admin").auth("foo", "bar");
- testInvalidAuthStates(replSetTest);
-
- replSetTest.stop(0);
- m = replSetTest.restart(0, {"keyFile": key1});
-
- replSetTest.stopSet();
+};
+
+var name = "rs_auth2";
+var path = "jstests/libs/";
+
+// These keyFiles have their permissions set to 600 later in the test.
+var key1 = path + "key1";
+var key2 = path + "key2";
+
+var replSetTest = new ReplSetTest({name: name, nodes: 3, waitForKeys: true});
+var nodes = replSetTest.startSet();
+var hostnames = replSetTest.nodeList();
+replSetTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": hostnames[0], "priority": 2},
+ {"_id": 1, "host": hostnames[1], priority: 0},
+ {"_id": 2, "host": hostnames[2], priority: 0}
+ ]
+});
+
+var master = replSetTest.getPrimary();
+
+print("add an admin user");
+master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: replSetTest.kDefaultTimeoutMS});
+var m = replSetTest.nodes[0];
+
+print("starting 1 and 2 with key file");
+replSetTest.stop(1);
+replSetTest.restart(1, {"keyFile": key1});
+replSetTest.stop(2);
+replSetTest.restart(2, {"keyFile": key1});
+
+// auth to all nodes with auth
+replSetTest.nodes[1].getDB("admin").auth("foo", "bar");
+replSetTest.nodes[2].getDB("admin").auth("foo", "bar");
+testInvalidAuthStates(replSetTest);
+
+print("restart mongod with bad keyFile");
+
+replSetTest.stop(0);
+m = replSetTest.restart(0, {"keyFile": key2});
+
+// auth to all nodes
+replSetTest.nodes[0].getDB("admin").auth("foo", "bar");
+replSetTest.nodes[1].getDB("admin").auth("foo", "bar");
+replSetTest.nodes[2].getDB("admin").auth("foo", "bar");
+testInvalidAuthStates(replSetTest);
+
+replSetTest.stop(0);
+m = replSetTest.restart(0, {"keyFile": key1});
+
+replSetTest.stopSet();
}());
diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js
index d35d0ec2919..179edf015d6 100644
--- a/jstests/replsets/auth_no_pri.js
+++ b/jstests/replsets/auth_no_pri.js
@@ -1,32 +1,31 @@
// Test that you can still authenticate a replset connection to a RS with no primary (SERVER-6665).
(function() {
- 'use strict';
+'use strict';
- var NODE_COUNT = 3;
- var rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"});
- var nodes = rs.startSet();
- rs.initiate();
+var NODE_COUNT = 3;
+var rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"});
+var nodes = rs.startSet();
+rs.initiate();
- // Add user
- var master = rs.getPrimary();
- master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT});
+// Add user
+var master = rs.getPrimary();
+master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT});
- // Can authenticate replset connection when whole set is up.
- var conn = new Mongo(rs.getURL());
- assert(conn.getDB('admin').auth('admin', 'pwd'));
- assert.writeOK(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}}));
+// Can authenticate replset connection when whole set is up.
+var conn = new Mongo(rs.getURL());
+assert(conn.getDB('admin').auth('admin', 'pwd'));
+assert.writeOK(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}}));
- // Make sure there is no primary
- rs.stop(0);
- rs.stop(1);
- rs.waitForState(nodes[2], ReplSetTest.State.SECONDARY);
+// Make sure there is no primary
+rs.stop(0);
+rs.stop(1);
+rs.waitForState(nodes[2], ReplSetTest.State.SECONDARY);
- // Make sure you can still authenticate a replset connection with no primary
- var conn2 = new Mongo(rs.getURL());
- conn2.setSlaveOk(true);
- assert(conn2.getDB('admin').auth({user: 'admin', pwd: 'pwd', mechanism: "SCRAM-SHA-1"}));
- assert.eq(1, conn2.getDB('admin').foo.findOne().a);
-
- rs.stopSet();
+// Make sure you can still authenticate a replset connection with no primary
+var conn2 = new Mongo(rs.getURL());
+conn2.setSlaveOk(true);
+assert(conn2.getDB('admin').auth({user: 'admin', pwd: 'pwd', mechanism: "SCRAM-SHA-1"}));
+assert.eq(1, conn2.getDB('admin').foo.findOne().a);
+rs.stopSet();
}());
diff --git a/jstests/replsets/await_replication_timeout.js b/jstests/replsets/await_replication_timeout.js
index 2fb4e6e9471..ce89a30c296 100644
--- a/jstests/replsets/await_replication_timeout.js
+++ b/jstests/replsets/await_replication_timeout.js
@@ -1,79 +1,77 @@
// Tests timeout behavior of waiting for write concern as well as its interaction with maxTimeMs
(function() {
- "use strict";
+"use strict";
- var replTest = new ReplSetTest({nodes: 3});
- replTest.startSet();
- replTest.initiate();
- var primary = replTest.getPrimary();
- var testDB = primary.getDB('test');
- const collName = 'foo';
- var testColl = testDB.getCollection(collName);
+var replTest = new ReplSetTest({nodes: 3});
+replTest.startSet();
+replTest.initiate();
+var primary = replTest.getPrimary();
+var testDB = primary.getDB('test');
+const collName = 'foo';
+var testColl = testDB.getCollection(collName);
- // Insert a document and implicitly create the collection.
- let resetCollection = function(w) {
- assert.writeOK(testColl.insert(
- {_id: 0}, {writeConcern: {w: w, wtimeout: replTest.kDefaultTimeoutMS}}));
- assert.eq(1, testColl.find().itcount());
- };
+// Insert a document and implicitly create the collection.
+let resetCollection = function(w) {
+ assert.writeOK(
+ testColl.insert({_id: 0}, {writeConcern: {w: w, wtimeout: replTest.kDefaultTimeoutMS}}));
+ assert.eq(1, testColl.find().itcount());
+};
- resetCollection(3);
+resetCollection(3);
- // Make sure that there are only 2 nodes up so w:3 writes will always time out
- replTest.stop(2);
+// Make sure that there are only 2 nodes up so w:3 writes will always time out
+replTest.stop(2);
- // Test wtimeout
- var res = testDB.runCommand(
- {insert: collName, documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1000}});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
+// Test wtimeout
+var res = testDB.runCommand(
+ {insert: collName, documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1000}});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
- // Test maxTimeMS timeout
- res = testDB.runCommand(
- {insert: collName, documents: [{a: 1}], writeConcern: {w: 3}, maxTimeMS: 1000});
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+// Test maxTimeMS timeout
+res = testDB.runCommand(
+ {insert: collName, documents: [{a: 1}], writeConcern: {w: 3}, maxTimeMS: 1000});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
- // Test with wtimeout < maxTimeMS
- res = testDB.runCommand({
- insert: collName,
- documents: [{a: 1}],
- writeConcern: {w: 3, wtimeout: 1000},
- maxTimeMS: 10 * 1000
- });
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
+// Test with wtimeout < maxTimeMS
+res = testDB.runCommand({
+ insert: collName,
+ documents: [{a: 1}],
+ writeConcern: {w: 3, wtimeout: 1000},
+ maxTimeMS: 10 * 1000
+});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
- // Test with wtimeout > maxTimeMS
- res = testDB.runCommand({
- insert: collName,
- documents: [{a: 1}],
- writeConcern: {w: 3, wtimeout: 10 * 1000},
- maxTimeMS: 1000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+// Test with wtimeout > maxTimeMS
+res = testDB.runCommand({
+ insert: collName,
+ documents: [{a: 1}],
+ writeConcern: {w: 3, wtimeout: 10 * 1000},
+ maxTimeMS: 1000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
- // dropDatabase respects the 'w' field when it is stronger than the default of majority.
- res = testDB.runCommand({dropDatabase: 1, writeConcern: {w: 3, wtimeout: 1000}});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
- assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
+// dropDatabase respects the 'w' field when it is stronger than the default of majority.
+res = testDB.runCommand({dropDatabase: 1, writeConcern: {w: 3, wtimeout: 1000}});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+assert.eq(ErrorCodes.WriteConcernFailed, res.writeConcernError.code);
- resetCollection(2);
+resetCollection(2);
- // Pause application on secondary so that commit point doesn't advance, meaning that a dropped
- // database on the primary will remain in 'drop-pending' state.
- var secondary = replTest.getSecondary();
- jsTestLog("Pausing oplog application on the secondary node.");
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+// Pause application on secondary so that commit point doesn't advance, meaning that a dropped
+// database on the primary will remain in 'drop-pending' state.
+var secondary = replTest.getSecondary();
+jsTestLog("Pausing oplog application on the secondary node.");
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
- // dropDatabase defaults to 'majority' when a weaker 'w' field is provided, but respects
- // 'wtimeout'.
- res = testDB.runCommand({dropDatabase: 1, writeConcern: {w: 1, wtimeout: 1000}});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
-
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- replTest.stopSet();
+// dropDatabase defaults to 'majority' when a weaker 'w' field is provided, but respects
+// 'wtimeout'.
+res = testDB.runCommand({dropDatabase: 1, writeConcern: {w: 1, wtimeout: 1000}});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed);
+assert.commandWorked(secondary.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+replTest.stopSet();
})();
diff --git a/jstests/replsets/awaitdata_getmore_new_last_committed_optime.js b/jstests/replsets/awaitdata_getmore_new_last_committed_optime.js
index 29680677670..0f76df2dfe5 100644
--- a/jstests/replsets/awaitdata_getmore_new_last_committed_optime.js
+++ b/jstests/replsets/awaitdata_getmore_new_last_committed_optime.js
@@ -3,108 +3,108 @@
// while an awaitData query is running. See SERVER-35239.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
- load('jstests/libs/check_log.js');
+'use strict';
+load('jstests/replsets/rslib.js');
+load('jstests/libs/check_log.js');
+
+const name = 'awaitdata_getmore_new_last_committed_optime';
+const replSet = new ReplSetTest({name: name, nodes: 5, settings: {chainingAllowed: false}});
+
+replSet.startSet();
+replSet.initiate();
+
+const dbName = 'test';
+const collName = 'coll';
+
+const primary = replSet.getPrimary();
+const secondaries = replSet.getSecondaries();
+const secondary = secondaries[0];
+
+const primaryDB = primary.getDB(dbName);
- const name = 'awaitdata_getmore_new_last_committed_optime';
- const replSet = new ReplSetTest({name: name, nodes: 5, settings: {chainingAllowed: false}});
+// Create capped collection on primary and allow it to be committed.
+assert.commandWorked(primaryDB.createCollection(collName, {capped: true, size: 2048}));
+replSet.awaitReplication();
+replSet.awaitLastOpCommitted();
- replSet.startSet();
- replSet.initiate();
+// Stop data replication on 3 secondaries to prevent writes being committed.
+jsTestLog('Stopping replication');
+stopServerReplication(secondaries[1]);
+stopServerReplication(secondaries[2]);
+stopServerReplication(secondaries[3]);
+
+// Write data to primary.
+for (let i = 0; i < 2; i++) {
+ assert.commandWorked(primaryDB[collName].insert({_id: i}, {writeConcern: {w: 2}}));
+}
+
+replSet.awaitReplication(null, null, [secondary]);
+jsTestLog('Secondary has replicated data');
+
+jsTestLog('Starting parallel shell');
+// Start a parallel shell because we'll be enabling a failpoint that will make the thread hang.
+let waitForGetMoreToFinish = startParallelShell(() => {
+ load('jstests/replsets/rslib.js');
+
+ const secondary = db.getMongo();
+ secondary.setSlaveOk();
const dbName = 'test';
const collName = 'coll';
-
- const primary = replSet.getPrimary();
- const secondaries = replSet.getSecondaries();
- const secondary = secondaries[0];
-
- const primaryDB = primary.getDB(dbName);
-
- // Create capped collection on primary and allow it to be committed.
- assert.commandWorked(primaryDB.createCollection(collName, {capped: true, size: 2048}));
- replSet.awaitReplication();
- replSet.awaitLastOpCommitted();
-
- // Stop data replication on 3 secondaries to prevent writes being committed.
- jsTestLog('Stopping replication');
- stopServerReplication(secondaries[1]);
- stopServerReplication(secondaries[2]);
- stopServerReplication(secondaries[3]);
-
- // Write data to primary.
- for (let i = 0; i < 2; i++) {
- assert.commandWorked(primaryDB[collName].insert({_id: i}, {writeConcern: {w: 2}}));
- }
-
- replSet.awaitReplication(null, null, [secondary]);
- jsTestLog('Secondary has replicated data');
-
- jsTestLog('Starting parallel shell');
- // Start a parallel shell because we'll be enabling a failpoint that will make the thread hang.
- let waitForGetMoreToFinish = startParallelShell(() => {
- load('jstests/replsets/rslib.js');
-
- const secondary = db.getMongo();
- secondary.setSlaveOk();
-
- const dbName = 'test';
- const collName = 'coll';
- const awaitDataDB = db.getSiblingDB('test');
-
- // Create awaitData cursor and get all data written so that a following getMore will have to
- // wait for more data.
- let cmdRes =
- awaitDataDB.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, dbName + "." + collName);
- assert.eq(cmdRes.cursor.firstBatch.length, 2, tojson(cmdRes));
-
- // Enable failpoint.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: 'planExecutorHangBeforeShouldWaitForInserts', mode: 'alwaysOn'}));
-
- // Call getMore on awaitData cursor with lastKnownCommittedOpTime ahead of node. This will
- // hang until we've disabled the failpoint. maxTimeMS must be set otherwise the default
- // timeout for waiting for inserts is 1 second.
- const lastOpTime = getLastOpTime(secondary);
- cmdRes = awaitDataDB.runCommand({
- getMore: cmdRes.cursor.id,
- collection: collName,
- batchSize: NumberInt(2),
- maxTimeMS: 10000,
- lastKnownCommittedOpTime: lastOpTime
- });
-
- assert.commandWorked(cmdRes);
- assert.gt(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, dbName + "." + collName);
- assert.eq(cmdRes.cursor.nextBatch.length, 0, tojson(cmdRes));
- }, secondary.port);
-
- // Ensure that we've hit the failpoint before moving on.
- checkLog.contains(
- secondary, 'PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point enabled');
-
- // Restart replication on the other nodes.
- jsTestLog('Restarting replication');
- restartServerReplication(secondaries[1]);
- restartServerReplication(secondaries[2]);
- restartServerReplication(secondaries[3]);
-
- // Wait until all nodes have committed the last op. At this point in executing the getMore,
- // the node's lastCommittedOpTime should now be ahead of the client's lastKnownCommittedOpTime.
- replSet.awaitLastOpCommitted();
- jsTestLog('All nodes caught up');
-
- // Disable failpoint.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'planExecutorHangBeforeShouldWaitForInserts', mode: 'off'}));
-
- waitForGetMoreToFinish();
- jsTestLog('Parallel shell successfully exited');
-
- replSet.stopSet();
+ const awaitDataDB = db.getSiblingDB('test');
+
+ // Create awaitData cursor and get all data written so that a following getMore will have to
+ // wait for more data.
+ let cmdRes =
+ awaitDataDB.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true});
+ assert.commandWorked(cmdRes);
+ assert.gt(cmdRes.cursor.id, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, dbName + "." + collName);
+ assert.eq(cmdRes.cursor.firstBatch.length, 2, tojson(cmdRes));
+
+ // Enable failpoint.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'planExecutorHangBeforeShouldWaitForInserts', mode: 'alwaysOn'}));
+
+ // Call getMore on awaitData cursor with lastKnownCommittedOpTime ahead of node. This will
+ // hang until we've disabled the failpoint. maxTimeMS must be set otherwise the default
+ // timeout for waiting for inserts is 1 second.
+ const lastOpTime = getLastOpTime(secondary);
+ cmdRes = awaitDataDB.runCommand({
+ getMore: cmdRes.cursor.id,
+ collection: collName,
+ batchSize: NumberInt(2),
+ maxTimeMS: 10000,
+ lastKnownCommittedOpTime: lastOpTime
+ });
+
+ assert.commandWorked(cmdRes);
+ assert.gt(cmdRes.cursor.id, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, dbName + "." + collName);
+ assert.eq(cmdRes.cursor.nextBatch.length, 0, tojson(cmdRes));
+}, secondary.port);
+
+// Ensure that we've hit the failpoint before moving on.
+checkLog.contains(secondary,
+ 'PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point enabled');
+
+// Restart replication on the other nodes.
+jsTestLog('Restarting replication');
+restartServerReplication(secondaries[1]);
+restartServerReplication(secondaries[2]);
+restartServerReplication(secondaries[3]);
+
+// Wait until all nodes have committed the last op. At this point in executing the getMore,
+// the node's lastCommittedOpTime should now be ahead of the client's lastKnownCommittedOpTime.
+replSet.awaitLastOpCommitted();
+jsTestLog('All nodes caught up');
+
+// Disable failpoint.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'planExecutorHangBeforeShouldWaitForInserts', mode: 'off'}));
+
+waitForGetMoreToFinish();
+jsTestLog('Parallel shell successfully exited');
+
+replSet.stopSet();
})();
diff --git a/jstests/replsets/background_index.js b/jstests/replsets/background_index.js
index e92530bc985..3b302644438 100644
--- a/jstests/replsets/background_index.js
+++ b/jstests/replsets/background_index.js
@@ -1,51 +1,52 @@
-/** Tests that a background index will be successfully
+/**
+ * Tests that a background index will be successfully
* replicated to a secondary when the indexed collection
* is renamed.
*/
(function() {
- "use strict";
-
- // Bring up a 2 node replset.
- var name = "bg_index_rename";
- var rst = new ReplSetTest({name: name, nodes: 3});
- rst.startSet();
- rst.initiate();
-
- // Create and populate a collection.
- var primary = rst.getPrimary();
- var coll = primary.getCollection("test.foo");
- var adminDB = primary.getDB("admin");
-
- for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i, x: i * 3, str: "hello world"}));
- }
-
- // Add a background index.
- coll.ensureIndex({x: 1}, {background: true});
-
- // Rename the collection.
- assert.commandWorked(
- adminDB.runCommand({renameCollection: "test.foo", to: "bar.test", dropTarget: true}),
- "Call to renameCollection failed.");
-
- // Await replication.
- rst.awaitReplication();
-
- // Step down the primary.
- assert.commandWorked(adminDB.runCommand({replSetStepDown: 60, force: true}));
-
- // Wait for new primary.
- var newPrimary = rst.getPrimary();
- assert.neq(primary, newPrimary);
- var barDB = newPrimary.getDB("bar");
- coll = newPrimary.getCollection("bar.test");
- coll.insert({_id: 200, x: 600, str: "goodnight moon"});
-
- // Check that the new primary has the index
- // on the renamed collection.
- var indexes = barDB.runCommand({listIndexes: "test"});
- assert.eq(indexes.cursor.firstBatch.length, 2);
-
- rst.stopSet();
+"use strict";
+
+// Bring up a 2 node replset.
+var name = "bg_index_rename";
+var rst = new ReplSetTest({name: name, nodes: 3});
+rst.startSet();
+rst.initiate();
+
+// Create and populate a collection.
+var primary = rst.getPrimary();
+var coll = primary.getCollection("test.foo");
+var adminDB = primary.getDB("admin");
+
+for (var i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({_id: i, x: i * 3, str: "hello world"}));
+}
+
+// Add a background index.
+coll.ensureIndex({x: 1}, {background: true});
+
+// Rename the collection.
+assert.commandWorked(
+ adminDB.runCommand({renameCollection: "test.foo", to: "bar.test", dropTarget: true}),
+ "Call to renameCollection failed.");
+
+// Await replication.
+rst.awaitReplication();
+
+// Step down the primary.
+assert.commandWorked(adminDB.runCommand({replSetStepDown: 60, force: true}));
+
+// Wait for new primary.
+var newPrimary = rst.getPrimary();
+assert.neq(primary, newPrimary);
+var barDB = newPrimary.getDB("bar");
+coll = newPrimary.getCollection("bar.test");
+coll.insert({_id: 200, x: 600, str: "goodnight moon"});
+
+// Check that the new primary has the index
+// on the renamed collection.
+var indexes = barDB.runCommand({listIndexes: "test"});
+assert.eq(indexes.cursor.firstBatch.length, 2);
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/batch_write_command_wc.js b/jstests/replsets/batch_write_command_wc.js
index 195e649fab8..9d41e7cba13 100644
--- a/jstests/replsets/batch_write_command_wc.js
+++ b/jstests/replsets/batch_write_command_wc.js
@@ -6,151 +6,174 @@
(function() {
- // Skip this test if running with the "wiredTiger" storage engine, since it requires
- // using 'nojournal' in a replica set, which is not supported when using WT.
- if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
- return;
- }
-
- var request;
- var result;
-
- // NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
-
- jsTest.log("Starting no journal/repl set tests...");
-
- // Start a single-node replica set with no journal
- // Allows testing immediate write concern failures and wc application failures
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet({nojournal: ""});
- rst.initiate();
- var mongod = rst.getPrimary();
- var coll = mongod.getCollection("test.batch_write_command_wc");
-
- //
- // Basic insert, default WC
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(1, coll.find().itcount());
-
- //
- // Basic insert, majority WC
- coll.remove({});
- printjson(
- request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'majority'}});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(1, coll.find().itcount());
-
- //
- // Basic insert, w:2 WC
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 2}});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(1, coll.find().itcount());
-
- //
- // Basic insert, immediate nojournal error
- coll.remove({});
- printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {j: true}});
- printjson(result = coll.runCommand(request));
- assert(!result.ok);
- assert.eq(0, coll.find().itcount());
-
- //
- // Basic insert, timeout wc error
- coll.remove({});
- printjson(
- request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1}});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert(result.writeConcernError);
- assert.eq(100, result.writeConcernError.code);
- assert.eq(1, coll.find().itcount());
-
- //
- // Basic insert, wmode wc error
- coll.remove({});
- printjson(
- request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'invalid'}});
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert(result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- //
- // Two ordered inserts, write error and wc error both reported
- coll.remove({});
- printjson(request = {
- insert: coll.getName(),
- documents: [{a: 1}, {$invalid: 'doc'}],
- writeConcern: {w: 'invalid'}
- });
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(result.writeErrors.length, 1);
- assert.eq(result.writeErrors[0].index, 1);
- assert(result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- //
- // Two unordered inserts, write error and wc error reported
- coll.remove({});
- printjson(request = {
- insert: coll.getName(),
- documents: [{a: 1}, {$invalid: 'doc'}],
- writeConcern: {w: 'invalid'},
- ordered: false
- });
- printjson(result = coll.runCommand(request));
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(result.writeErrors.length, 1);
- assert.eq(result.writeErrors[0].index, 1);
- assert(result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- //
- // Write error with empty writeConcern object.
- coll.remove({});
- request =
- {insert: coll.getName(), documents: [{_id: 1}, {_id: 1}], writeConcern: {}, ordered: false};
- result = coll.runCommand(request);
- assert(result.ok);
- assert.eq(1, result.n);
- assert.eq(result.writeErrors.length, 1);
- assert.eq(result.writeErrors[0].index, 1);
- assert.eq(null, result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- //
- // Write error with unspecified w.
- coll.remove({});
- request = {
- insert: coll.getName(),
- documents: [{_id: 1}, {_id: 1}],
- writeConcern: {wtimeout: 1},
- ordered: false
- };
- result = assert.commandWorkedIgnoringWriteErrors(coll.runCommand(request));
- assert.eq(1, result.n);
- assert.eq(result.writeErrors.length, 1);
- assert.eq(result.writeErrors[0].index, 1);
- assert.eq(null, result.writeConcernError);
- assert.eq(1, coll.find().itcount());
-
- jsTest.log("DONE no journal/repl tests");
- rst.stopSet();
+// Skip this test if running with the "wiredTiger" storage engine, since it requires
+// using 'nojournal' in a replica set, which is not supported when using WT.
+if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
+ // WT is currently the default engine so it is used when 'storageEngine' is not set.
+ jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+ return;
+}
+var request;
+var result;
+
+// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
+
+jsTest.log("Starting no journal/repl set tests...");
+
+// Start a single-node replica set with no journal
+// Allows testing immediate write concern failures and wc application failures
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet({nojournal: ""});
+rst.initiate();
+var mongod = rst.getPrimary();
+var coll = mongod.getCollection("test.batch_write_command_wc");
+
+//
+// Basic insert, default WC
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}]
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic insert, majority WC
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 'majority'}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic insert, w:2 WC
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 2}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic insert, immediate nojournal error
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {j: true}
+});
+printjson(result = coll.runCommand(request));
+assert(!result.ok);
+assert.eq(0, coll.find().itcount());
+
+//
+// Basic insert, timeout wc error
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 3, wtimeout: 1}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert(result.writeConcernError);
+assert.eq(100, result.writeConcernError.code);
+assert.eq(1, coll.find().itcount());
+
+//
+// Basic insert, wmode wc error
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 'invalid'}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert(result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Two ordered inserts, write error and wc error both reported
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}, {$invalid: 'doc'}],
+ writeConcern: {w: 'invalid'}
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(result.writeErrors.length, 1);
+assert.eq(result.writeErrors[0].index, 1);
+assert(result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Two unordered inserts, write error and wc error reported
+coll.remove({});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}, {$invalid: 'doc'}],
+ writeConcern: {w: 'invalid'},
+ ordered: false
+});
+printjson(result = coll.runCommand(request));
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(result.writeErrors.length, 1);
+assert.eq(result.writeErrors[0].index, 1);
+assert(result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Write error with empty writeConcern object.
+coll.remove({});
+request = {
+ insert: coll.getName(),
+ documents: [{_id: 1}, {_id: 1}],
+ writeConcern: {},
+ ordered: false
+};
+result = coll.runCommand(request);
+assert(result.ok);
+assert.eq(1, result.n);
+assert.eq(result.writeErrors.length, 1);
+assert.eq(result.writeErrors[0].index, 1);
+assert.eq(null, result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+//
+// Write error with unspecified w.
+coll.remove({});
+request = {
+ insert: coll.getName(),
+ documents: [{_id: 1}, {_id: 1}],
+ writeConcern: {wtimeout: 1},
+ ordered: false
+};
+result = assert.commandWorkedIgnoringWriteErrors(coll.runCommand(request));
+assert.eq(1, result.n);
+assert.eq(result.writeErrors.length, 1);
+assert.eq(result.writeErrors[0].index, 1);
+assert.eq(null, result.writeConcernError);
+assert.eq(1, coll.find().itcount());
+
+jsTest.log("DONE no journal/repl tests");
+rst.stopSet();
})();
diff --git a/jstests/replsets/buildindexes.js b/jstests/replsets/buildindexes.js
index 303ed1f352b..1276ad73a61 100644
--- a/jstests/replsets/buildindexes.js
+++ b/jstests/replsets/buildindexes.js
@@ -1,68 +1,68 @@
// Check that buildIndexes config option is working
(function() {
- // Skip db hash check because secondary will have different number of indexes due to
- // buildIndexes=false on the secondary.
- TestData.skipCheckDBHashes = true;
- var name = "buildIndexes";
- var host = getHostName();
+// Skip db hash check because secondary will have different number of indexes due to
+// buildIndexes=false on the secondary.
+TestData.skipCheckDBHashes = true;
+var name = "buildIndexes";
+var host = getHostName();
- var replTest = new ReplSetTest({name: name, nodes: 3});
+var replTest = new ReplSetTest({name: name, nodes: 3});
- var nodes = replTest.startSet();
+var nodes = replTest.startSet();
- var config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.members[2].buildIndexes = false;
+var config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+config.members[2].buildIndexes = false;
- replTest.initiate(config);
+replTest.initiate(config);
- var master = replTest.getPrimary().getDB(name);
- var slaveConns = replTest._slaves;
- var slave = [];
- for (var i in slaveConns) {
- slaveConns[i].setSlaveOk();
- slave.push(slaveConns[i].getDB(name));
- }
- replTest.awaitReplication();
+var master = replTest.getPrimary().getDB(name);
+var slaveConns = replTest._slaves;
+var slave = [];
+for (var i in slaveConns) {
+ slaveConns[i].setSlaveOk();
+ slave.push(slaveConns[i].getDB(name));
+}
+replTest.awaitReplication();
- master.x.ensureIndex({y: 1});
+master.x.ensureIndex({y: 1});
- for (i = 0; i < 100; i++) {
- master.x.insert({x: 1, y: "abc", c: 1});
- }
+for (i = 0; i < 100; i++) {
+ master.x.insert({x: 1, y: "abc", c: 1});
+}
- replTest.awaitReplication();
+replTest.awaitReplication();
- assert.commandWorked(slave[0].runCommand({count: "x"}));
+assert.commandWorked(slave[0].runCommand({count: "x"}));
- var indexes = slave[0].stats().indexes;
- assert.eq(indexes, 2, 'number of indexes');
+var indexes = slave[0].stats().indexes;
+assert.eq(indexes, 2, 'number of indexes');
- indexes = slave[1].stats().indexes;
- assert.eq(indexes, 1);
+indexes = slave[1].stats().indexes;
+assert.eq(indexes, 1);
- indexes = slave[0].x.stats().indexSizes;
+indexes = slave[0].x.stats().indexSizes;
- var count = 0;
- for (i in indexes) {
- count++;
- if (i == "_id_") {
- continue;
- }
- assert(i.match(/y_/));
+var count = 0;
+for (i in indexes) {
+ count++;
+ if (i == "_id_") {
+ continue;
}
+ assert(i.match(/y_/));
+}
- assert.eq(count, 2);
+assert.eq(count, 2);
- indexes = slave[1].x.stats().indexSizes;
+indexes = slave[1].x.stats().indexSizes;
- count = 0;
- for (i in indexes) {
- count++;
- }
+count = 0;
+for (i in indexes) {
+ count++;
+}
- assert.eq(count, 1);
+assert.eq(count, 1);
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/buildindexes_false_with_system_indexes.js b/jstests/replsets/buildindexes_false_with_system_indexes.js
index 6275e21d0ee..2c394d3e264 100644
--- a/jstests/replsets/buildindexes_false_with_system_indexes.js
+++ b/jstests/replsets/buildindexes_false_with_system_indexes.js
@@ -5,85 +5,84 @@
* @tags: [requires_persistence]
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const testName = "buildindexes_false_with_system_indexes";
+const testName = "buildindexes_false_with_system_indexes";
- let rst = new ReplSetTest({
- name: testName,
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- {rsConfig: {priority: 0, hidden: true, buildIndexes: false}},
- ],
- });
- const nodes = rst.startSet();
- rst.initiate();
+let rst = new ReplSetTest({
+ name: testName,
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ {rsConfig: {priority: 0, hidden: true, buildIndexes: false}},
+ ],
+});
+const nodes = rst.startSet();
+rst.initiate();
- let primary = rst.getPrimary();
- assert.eq(primary, nodes[0]);
- let secondary = nodes[1];
- const hidden = nodes[2];
+let primary = rst.getPrimary();
+assert.eq(primary, nodes[0]);
+let secondary = nodes[1];
+const hidden = nodes[2];
- rst.awaitReplication();
- jsTestLog("Creating a role in the admin database");
- let adminDb = primary.getDB("admin");
- adminDb.createRole(
- {role: 'test_role', roles: [{role: 'readWrite', db: 'test'}], privileges: []});
- rst.awaitReplication();
+rst.awaitReplication();
+jsTestLog("Creating a role in the admin database");
+let adminDb = primary.getDB("admin");
+adminDb.createRole({role: 'test_role', roles: [{role: 'readWrite', db: 'test'}], privileges: []});
+rst.awaitReplication();
- jsTestLog("Creating a user in the admin database");
- adminDb.createUser({user: 'test_user', pwd: 'test', roles: [{role: 'test_role', db: 'admin'}]});
- rst.awaitReplication();
+jsTestLog("Creating a user in the admin database");
+adminDb.createUser({user: 'test_user', pwd: 'test', roles: [{role: 'test_role', db: 'admin'}]});
+rst.awaitReplication();
- // Make sure the indexes we expect are present on all nodes. The buildIndexes: false node
- // should have only the _id_ index.
- let secondaryAdminDb = secondary.getDB("admin");
- const hiddenAdminDb = hidden.getDB("admin");
+// Make sure the indexes we expect are present on all nodes. The buildIndexes: false node
+// should have only the _id_ index.
+let secondaryAdminDb = secondary.getDB("admin");
+const hiddenAdminDb = hidden.getDB("admin");
- assert.eq(["_id_", "user_1_db_1"], adminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "role_1_db_1"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "user_1_db_1"],
- secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "role_1_db_1"],
- secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "user_1_db_1"], adminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "role_1_db_1"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "user_1_db_1"],
+ secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "role_1_db_1"],
+ secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- // Drop the indexes and restart the secondary. The indexes should not be re-created.
- jsTestLog("Dropping system indexes and restarting secondary.");
- adminDb.system.users.dropIndex("user_1_db_1");
- adminDb.system.roles.dropIndex("role_1_db_1");
- rst.awaitReplication();
- assert.eq(["_id_"], adminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+// Drop the indexes and restart the secondary. The indexes should not be re-created.
+jsTestLog("Dropping system indexes and restarting secondary.");
+adminDb.system.users.dropIndex("user_1_db_1");
+adminDb.system.roles.dropIndex("role_1_db_1");
+rst.awaitReplication();
+assert.eq(["_id_"], adminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- secondary = rst.restart(secondary, {}, true /* wait for node to become healthy */);
- secondaryAdminDb = secondary.getDB("admin");
- assert.eq(["_id_"], secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+secondary = rst.restart(secondary, {}, true /* wait for node to become healthy */);
+secondaryAdminDb = secondary.getDB("admin");
+assert.eq(["_id_"], secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- jsTestLog("Now restarting primary; indexes should be created.");
- rst.restart(primary);
- primary = rst.getPrimary();
- rst.awaitReplication();
- rst.waitForAllIndexBuildsToFinish("admin", "system.users");
- rst.waitForAllIndexBuildsToFinish("admin", "system.roles");
- adminDb = primary.getDB("admin");
- assert.eq(["_id_", "user_1_db_1"], adminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "role_1_db_1"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "user_1_db_1"],
- secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_", "role_1_db_1"],
- secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
- assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+jsTestLog("Now restarting primary; indexes should be created.");
+rst.restart(primary);
+primary = rst.getPrimary();
+rst.awaitReplication();
+rst.waitForAllIndexBuildsToFinish("admin", "system.users");
+rst.waitForAllIndexBuildsToFinish("admin", "system.roles");
+adminDb = primary.getDB("admin");
+assert.eq(["_id_", "user_1_db_1"], adminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "role_1_db_1"], adminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "user_1_db_1"],
+ secondaryAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_", "role_1_db_1"],
+ secondaryAdminDb.system.roles.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.users.getIndexes().map(x => x.name).sort());
+assert.eq(["_id_"], hiddenAdminDb.system.roles.getIndexes().map(x => x.name).sort());
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/bulk_api_wc.js b/jstests/replsets/bulk_api_wc.js
index fd5413ddd29..591ad1aef58 100644
--- a/jstests/replsets/bulk_api_wc.js
+++ b/jstests/replsets/bulk_api_wc.js
@@ -6,146 +6,143 @@
(function() {
- jsTest.log("Starting bulk api write concern tests...");
-
- // Skip this test if running with the "wiredTiger" storage engine, since it requires
- // using 'nojournal' in a replica set, which is not supported when using WT.
- if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
- return;
- }
-
- // Start a 2-node replica set with no journal
- // Allows testing immediate write concern failures and wc application failures
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet({nojournal: ""});
- rst.initiate();
- var mongod = rst.getPrimary();
- var coll = mongod.getCollection("test.bulk_api_wc");
-
- var executeTests = function() {
-
- // Create a unique index, legacy writes validate too early to use invalid documents for
- // write
- // error testing
- coll.ensureIndex({a: 1}, {unique: true});
-
- //
- // Ordered
- //
-
- //
- // Fail due to nojournal
- coll.remove({});
- var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- assert.throws(function() {
- bulk.execute({j: true});
- });
-
- //
- // Fail due to unrecognized write concern field.
- coll.remove({});
- var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- var result = assert.throws(function() {
- bulk.execute({x: 1});
- });
- assert.eq(
- ErrorCodes.FailedToParse, result.code, 'unexpected error code: ' + tojson(result));
- assert.eq('unrecognized write concern field: x',
- result.errmsg,
- 'unexpected error message: ' + tojson(result));
-
- //
- // Fail with write error, no write concern error even though it would fail on apply for
- // ordered
- coll.remove({});
- var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.insert({a: 2});
- result = assert.throws(function() {
- bulk.execute({w: 'invalid'});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.getWriteErrors()[0].index, 2);
- assert(!result.getWriteConcernError());
- assert.eq(coll.find().itcount(), 2);
-
- //
- // Unordered
- //
-
- //
- // Fail with write error, write concern error reported when unordered
- coll.remove({});
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.insert({a: 2});
- var result = assert.throws(function() {
- bulk.execute({w: 'invalid'});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.getWriteErrors()[0].index, 2);
- assert(result.getWriteConcernError());
- assert.eq(coll.find().itcount(), 2);
-
- //
- // Fail with write error, write concern timeout reported when unordered
- // Note that wtimeout:true can only be reported when the batch is all the same, so there's
- // not
- // multiple wc errors
- coll.remove({});
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.insert({a: 2});
- var result = assert.throws(function() {
- bulk.execute({w: 3, wtimeout: 1});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.getWriteErrors()[0].index, 2);
- assert.eq(100, result.getWriteConcernError().code);
- assert.eq(coll.find().itcount(), 2);
-
- //
- // Fail with write error and upserted, write concern error reported when unordered
- coll.remove({});
- var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a: 1});
- bulk.insert({a: 2});
- bulk.find({a: 3}).upsert().updateOne({a: 3});
- bulk.insert({a: 3});
- var result = assert.throws(function() {
- bulk.execute({w: 'invalid'});
- });
- assert.eq(result.nInserted, 2);
- assert.eq(result.nUpserted, 1);
- assert.eq(result.getUpsertedIds()[0].index, 2);
- assert.eq(result.getWriteErrors()[0].index, 3);
- assert(result.getWriteConcernError());
- assert.eq(coll.find().itcount(), 3);
- };
-
- // Use write commands
- coll.getMongo().useWriteCommands = function() {
- return true;
- };
- executeTests();
-
- // FAILING currently due to incorrect batch api reading of GLE
- // Use legacy opcodes
- coll.getMongo().useWriteCommands = function() {
- return false;
- };
- executeTests();
-
- jsTest.log("DONE bulk api wc tests");
- rst.stopSet();
-
+jsTest.log("Starting bulk api write concern tests...");
+
+// Skip this test if running with the "wiredTiger" storage engine, since it requires
+// using 'nojournal' in a replica set, which is not supported when using WT.
+if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
+ // WT is currently the default engine so it is used when 'storageEngine' is not set.
+ jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+ return;
+}
+
+// Start a 2-node replica set with no journal
+// Allows testing immediate write concern failures and wc application failures
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet({nojournal: ""});
+rst.initiate();
+var mongod = rst.getPrimary();
+var coll = mongod.getCollection("test.bulk_api_wc");
+
+var executeTests = function() {
+ // Create a unique index, legacy writes validate too early to use invalid documents for
+ // write
+ // error testing
+ coll.ensureIndex({a: 1}, {unique: true});
+
+ //
+ // Ordered
+ //
+
+ //
+ // Fail due to nojournal
+ coll.remove({});
+ var bulk = coll.initializeOrderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ assert.throws(function() {
+ bulk.execute({j: true});
+ });
+
+ //
+ // Fail due to unrecognized write concern field.
+ coll.remove({});
+ var bulk = coll.initializeOrderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ var result = assert.throws(function() {
+ bulk.execute({x: 1});
+ });
+ assert.eq(ErrorCodes.FailedToParse, result.code, 'unexpected error code: ' + tojson(result));
+ assert.eq('unrecognized write concern field: x',
+ result.errmsg,
+ 'unexpected error message: ' + tojson(result));
+
+ //
+ // Fail with write error, no write concern error even though it would fail on apply for
+ // ordered
+ coll.remove({});
+ var bulk = coll.initializeOrderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.insert({a: 2});
+ result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+ });
+ assert.eq(result.nInserted, 2);
+ assert.eq(result.getWriteErrors()[0].index, 2);
+ assert(!result.getWriteConcernError());
+ assert.eq(coll.find().itcount(), 2);
+
+ //
+ // Unordered
+ //
+
+ //
+ // Fail with write error, write concern error reported when unordered
+ coll.remove({});
+ var bulk = coll.initializeUnorderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.insert({a: 2});
+ var result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+ });
+ assert.eq(result.nInserted, 2);
+ assert.eq(result.getWriteErrors()[0].index, 2);
+ assert(result.getWriteConcernError());
+ assert.eq(coll.find().itcount(), 2);
+
+ //
+ // Fail with write error, write concern timeout reported when unordered
+ // Note that wtimeout:true can only be reported when the batch is all the same, so there's
+ // not
+ // multiple wc errors
+ coll.remove({});
+ var bulk = coll.initializeUnorderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.insert({a: 2});
+ var result = assert.throws(function() {
+ bulk.execute({w: 3, wtimeout: 1});
+ });
+ assert.eq(result.nInserted, 2);
+ assert.eq(result.getWriteErrors()[0].index, 2);
+ assert.eq(100, result.getWriteConcernError().code);
+ assert.eq(coll.find().itcount(), 2);
+
+ //
+ // Fail with write error and upserted, write concern error reported when unordered
+ coll.remove({});
+ var bulk = coll.initializeUnorderedBulkOp();
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.find({a: 3}).upsert().updateOne({a: 3});
+ bulk.insert({a: 3});
+ var result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+ });
+ assert.eq(result.nInserted, 2);
+ assert.eq(result.nUpserted, 1);
+ assert.eq(result.getUpsertedIds()[0].index, 2);
+ assert.eq(result.getWriteErrors()[0].index, 3);
+ assert(result.getWriteConcernError());
+ assert.eq(coll.find().itcount(), 3);
+};
+
+// Use write commands
+coll.getMongo().useWriteCommands = function() {
+ return true;
+};
+executeTests();
+
+// FAILING currently due to incorrect batch api reading of GLE
+// Use legacy opcodes
+coll.getMongo().useWriteCommands = function() {
+ return false;
+};
+executeTests();
+
+jsTest.log("DONE bulk api wc tests");
+rst.stopSet();
})();
diff --git a/jstests/replsets/capped_insert_order.js b/jstests/replsets/capped_insert_order.js
index 835c707f9fa..f1a63ea683e 100644
--- a/jstests/replsets/capped_insert_order.js
+++ b/jstests/replsets/capped_insert_order.js
@@ -2,48 +2,48 @@
// See SERVER-21483.
(function() {
- "use strict";
-
- var replTest = new ReplSetTest({name: 'capped_insert_order', nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- var slave = replTest._slaves[0];
-
- var dbName = "db";
- var masterDb = master.getDB(dbName);
- var slaveDb = slave.getDB(dbName);
-
- var collectionName = "collection";
- var masterColl = masterDb[collectionName];
- var slaveColl = slaveDb[collectionName];
-
- // Making a large capped collection to ensure that every document fits.
- masterDb.createCollection(collectionName, {capped: true, size: 1024 * 1024});
-
- // Insert 1000 docs with _id from 0 to 999 inclusive.
- const nDocuments = 1000;
- var batch = masterColl.initializeOrderedBulkOp();
- for (var i = 0; i < nDocuments; i++) {
- batch.insert({_id: i});
- }
- assert.writeOK(batch.execute());
- replTest.awaitReplication();
-
- function checkCollection(coll) {
- assert.eq(coll.find().itcount(), nDocuments);
-
- var i = 0;
- coll.find().forEach(function(doc) {
- assert.eq(doc._id, i);
- i++;
- });
- assert.eq(i, nDocuments);
- }
-
- checkCollection(masterColl);
- checkCollection(slaveColl);
-
- replTest.stopSet();
+"use strict";
+
+var replTest = new ReplSetTest({name: 'capped_insert_order', nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+var slave = replTest._slaves[0];
+
+var dbName = "db";
+var masterDb = master.getDB(dbName);
+var slaveDb = slave.getDB(dbName);
+
+var collectionName = "collection";
+var masterColl = masterDb[collectionName];
+var slaveColl = slaveDb[collectionName];
+
+// Making a large capped collection to ensure that every document fits.
+masterDb.createCollection(collectionName, {capped: true, size: 1024 * 1024});
+
+// Insert 1000 docs with _id from 0 to 999 inclusive.
+const nDocuments = 1000;
+var batch = masterColl.initializeOrderedBulkOp();
+for (var i = 0; i < nDocuments; i++) {
+ batch.insert({_id: i});
+}
+assert.writeOK(batch.execute());
+replTest.awaitReplication();
+
+function checkCollection(coll) {
+ assert.eq(coll.find().itcount(), nDocuments);
+
+ var i = 0;
+ coll.find().forEach(function(doc) {
+ assert.eq(doc._id, i);
+ i++;
+ });
+ assert.eq(i, nDocuments);
+}
+
+checkCollection(masterColl);
+checkCollection(slaveColl);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/catchup.js b/jstests/replsets/catchup.js
index 78ec12ab888..772a8dfa3a4 100644
--- a/jstests/replsets/catchup.js
+++ b/jstests/replsets/catchup.js
@@ -1,221 +1,219 @@
// Test the catch-up behavior of new primaries.
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/replsets/libs/election_metrics.js");
- load("jstests/replsets/rslib.js");
-
- var name = "catch_up";
- var rst = new ReplSetTest({name: name, nodes: 3, useBridge: true, waitForKeys: true});
-
- rst.startSet();
- var conf = rst.getReplSetConfig();
- conf.members[2].priority = 0;
- conf.settings = {
- heartbeatIntervalMillis: 500,
- electionTimeoutMillis: 10000,
- catchUpTimeoutMillis: 4 * 60 * 1000
- };
- rst.initiate(conf);
- rst.awaitSecondaryNodes();
-
- var primary = rst.getPrimary();
- var primaryColl = primary.getDB("test").coll;
-
- // Set verbosity for replication on all nodes.
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": {
- "replication": {"verbosity": 2},
- }
- };
- rst.nodes.forEach(function(node) {
- node.adminCommand(verbosity);
- });
-
- function stepUpNode(node) {
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
- rst.awaitNodesAgreeOnPrimary(rst.kDefaultTimeoutMS, rst.nodes, rst.getNodeId(node));
- return node.adminCommand('replSetGetStatus').myState == ReplSetTest.State.PRIMARY;
- }, 'failed to step up node ' + node.host, rst.kDefaultTimeoutMS);
-
- return node;
- }
-
- function checkOpInOplog(node, op, count) {
- node.getDB("admin").getMongo().setSlaveOk();
- var oplog = node.getDB("local")['oplog.rs'];
- var oplogArray = oplog.find().toArray();
- assert.eq(oplog.count(op), count, "op: " + tojson(op) + ", oplog: " + tojson(oplogArray));
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/libs/election_metrics.js");
+load("jstests/replsets/rslib.js");
+
+var name = "catch_up";
+var rst = new ReplSetTest({name: name, nodes: 3, useBridge: true, waitForKeys: true});
+
+rst.startSet();
+var conf = rst.getReplSetConfig();
+conf.members[2].priority = 0;
+conf.settings = {
+ heartbeatIntervalMillis: 500,
+ electionTimeoutMillis: 10000,
+ catchUpTimeoutMillis: 4 * 60 * 1000
+};
+rst.initiate(conf);
+rst.awaitSecondaryNodes();
+
+var primary = rst.getPrimary();
+var primaryColl = primary.getDB("test").coll;
+
+// Set verbosity for replication on all nodes.
+var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {
+ "replication": {"verbosity": 2},
}
-
- // Stop replication on secondaries, do writes and step up one of the secondaries.
- //
- // The old primary has extra writes that are not replicated to the other nodes yet,
- // but the new primary steps up, getting the vote from the the third node "voter".
- function stopReplicationAndEnforceNewPrimaryToCatchUp() {
- // Write documents that cannot be replicated to secondaries in time.
- var oldSecondaries = rst.getSecondaries();
- var oldPrimary = rst.getPrimary();
- stopServerReplication(oldSecondaries);
- for (var i = 0; i < 3; i++) {
- assert.writeOK(oldPrimary.getDB("test").foo.insert({x: i}));
- }
- var latestOpOnOldPrimary = getLatestOp(oldPrimary);
-
- // New primary wins immediately, but needs to catch up.
- var newPrimary = stepUpNode(oldSecondaries[0]);
- var latestOpOnNewPrimary = getLatestOp(newPrimary);
- // Check this node is not writable.
- assert.eq(newPrimary.getDB("test").isMaster().ismaster, false);
-
- return {
- oldSecondaries: oldSecondaries,
- oldPrimary: oldPrimary,
- newPrimary: newPrimary,
- voter: oldSecondaries[1],
- latestOpOnOldPrimary: latestOpOnOldPrimary,
- latestOpOnNewPrimary: latestOpOnNewPrimary
- };
- }
-
- function reconfigElectionAndCatchUpTimeout(electionTimeout, catchupTimeout) {
- // Reconnect all nodes to make sure reconfig succeeds.
- rst.nodes.forEach(reconnect);
- // Reconfigure replica set to decrease catchup timeout.
- var newConfig = rst.getReplSetConfigFromNode();
- newConfig.version++;
- newConfig.settings.catchUpTimeoutMillis = catchupTimeout;
- newConfig.settings.electionTimeoutMillis = electionTimeout;
- reconfig(rst, newConfig);
- rst.awaitReplication();
- rst.awaitNodesAgreeOnPrimary();
+};
+rst.nodes.forEach(function(node) {
+ node.adminCommand(verbosity);
+});
+
+function stepUpNode(node) {
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
+ rst.awaitNodesAgreeOnPrimary(rst.kDefaultTimeoutMS, rst.nodes, rst.getNodeId(node));
+ return node.adminCommand('replSetGetStatus').myState == ReplSetTest.State.PRIMARY;
+ }, 'failed to step up node ' + node.host, rst.kDefaultTimeoutMS);
+
+ return node;
+}
+
+function checkOpInOplog(node, op, count) {
+ node.getDB("admin").getMongo().setSlaveOk();
+ var oplog = node.getDB("local")['oplog.rs'];
+ var oplogArray = oplog.find().toArray();
+ assert.eq(oplog.count(op), count, "op: " + tojson(op) + ", oplog: " + tojson(oplogArray));
+}
+
+// Stop replication on secondaries, do writes and step up one of the secondaries.
+//
+// The old primary has extra writes that are not replicated to the other nodes yet,
+// but the new primary steps up, getting the vote from the the third node "voter".
+function stopReplicationAndEnforceNewPrimaryToCatchUp() {
+ // Write documents that cannot be replicated to secondaries in time.
+ var oldSecondaries = rst.getSecondaries();
+ var oldPrimary = rst.getPrimary();
+ stopServerReplication(oldSecondaries);
+ for (var i = 0; i < 3; i++) {
+ assert.writeOK(oldPrimary.getDB("test").foo.insert({x: i}));
}
+ var latestOpOnOldPrimary = getLatestOp(oldPrimary);
+
+ // New primary wins immediately, but needs to catch up.
+ var newPrimary = stepUpNode(oldSecondaries[0]);
+ var latestOpOnNewPrimary = getLatestOp(newPrimary);
+ // Check this node is not writable.
+ assert.eq(newPrimary.getDB("test").isMaster().ismaster, false);
+
+ return {
+ oldSecondaries: oldSecondaries,
+ oldPrimary: oldPrimary,
+ newPrimary: newPrimary,
+ voter: oldSecondaries[1],
+ latestOpOnOldPrimary: latestOpOnOldPrimary,
+ latestOpOnNewPrimary: latestOpOnNewPrimary
+ };
+}
- rst.awaitReplication();
-
- jsTest.log("Case 1: The primary is up-to-date after refreshing heartbeats.");
- // Should complete transition to primary immediately.
- var newPrimary = stepUpNode(rst.getSecondary());
- // Should win an election and finish the transition very quickly.
- assert.eq(newPrimary, rst.getPrimary());
- rst.awaitReplication();
-
- jsTest.log("Case 2: The primary needs to catch up, succeeds in time.");
- let initialNewPrimaryStatus =
- assert.commandWorked(rst.getSecondaries()[0].adminCommand({serverStatus: 1}));
-
- var stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
-
- // Disable fail point to allow replication.
- restartServerReplication(stepUpResults.oldSecondaries);
- // getPrimary() blocks until the primary finishes drain mode.
- assert.eq(stepUpResults.newPrimary, rst.getPrimary());
-
- // Check that the 'numCatchUps' field has been incremented in serverStatus.
- let newNewPrimaryStatus =
- assert.commandWorked(stepUpResults.newPrimary.adminCommand({serverStatus: 1}));
- verifyServerStatusChange(initialNewPrimaryStatus.electionMetrics,
- newNewPrimaryStatus.electionMetrics,
- 'numCatchUps',
- 1);
-
- // Wait for all secondaries to catch up
- rst.awaitReplication();
- // Check the latest op on old primary is preserved on the new one.
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 1);
- rst.awaitReplication();
-
- jsTest.log("Case 3: The primary needs to catch up, but has to change sync source to catch up.");
- // Reconfig the election timeout to be longer than 1 minute so that the third node will no
- // longer be blacklisted by the new primary if it happened to be at the beginning of the test.
- reconfigElectionAndCatchUpTimeout(3 * 60 * 1000, conf.settings.catchUpTimeoutMillis);
-
- stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
-
- // Disable fail point on the voter. Wait until it catches up with the old primary.
- restartServerReplication(stepUpResults.voter);
- assert.commandWorked(
- stepUpResults.voter.adminCommand({replSetSyncFrom: stepUpResults.oldPrimary.host}));
- // Wait until the new primary knows the last applied optime on the voter, so it will keep
- // catching up after the old primary is disconnected.
- assert.soon(function() {
- var replSetStatus =
- assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetGetStatus: 1}));
- var voterStatus = replSetStatus.members.filter(m => m.name == stepUpResults.voter.host)[0];
- return rs.compareOpTimes(voterStatus.optime, stepUpResults.latestOpOnOldPrimary) == 0;
- });
- // Disconnect the new primary and the old one.
- stepUpResults.oldPrimary.disconnect(stepUpResults.newPrimary);
- // Disable the failpoint, the new primary should sync from the other secondary.
- restartServerReplication(stepUpResults.newPrimary);
- assert.eq(stepUpResults.newPrimary, rst.getPrimary());
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 1);
- // Restore the broken connection
- stepUpResults.oldPrimary.reconnect(stepUpResults.newPrimary);
- rst.awaitReplication();
-
- jsTest.log("Case 4: The primary needs to catch up, fails due to timeout.");
- // Reconfig to make the catchup timeout shorter.
- reconfigElectionAndCatchUpTimeout(conf.settings.electionTimeoutMillis, 10 * 1000);
-
- stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
- // Wait until the new primary completes the transition to primary and writes a no-op.
- checkLog.contains(stepUpResults.newPrimary, "Catchup timed out after becoming primary");
- restartServerReplication(stepUpResults.newPrimary);
- assert.eq(stepUpResults.newPrimary, rst.getPrimary());
-
- // Wait for the no-op "new primary" after winning an election, so that we know it has
- // finished transition to primary.
- assert.soon(function() {
- return rs.compareOpTimes(stepUpResults.latestOpOnOldPrimary,
- getLatestOp(stepUpResults.newPrimary)) < 0;
- });
- // The extra oplog entries on the old primary are not replicated to the new one.
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
- restartServerReplication(stepUpResults.voter);
- rst.awaitReplication();
-
- jsTest.log("Case 5: The primary needs to catch up with no timeout, then gets aborted.");
- // Reconfig to make the catchup timeout infinite.
- reconfigElectionAndCatchUpTimeout(conf.settings.electionTimeoutMillis, -1);
- stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
-
- // Abort catchup.
- assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetAbortPrimaryCatchUp: 1}));
-
- // Wait for the no-op "new primary" after winning an election, so that we know it has
- // finished transition to primary.
- assert.soon(function() {
- return rs.compareOpTimes(stepUpResults.latestOpOnOldPrimary,
- getLatestOp(stepUpResults.newPrimary)) < 0;
- });
- // The extra oplog entries on the old primary are not replicated to the new one.
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
- restartServerReplication(stepUpResults.oldSecondaries);
- rst.awaitReplication();
- checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
-
- jsTest.log("Case 6: The primary needs to catch up with no timeout, but steps down.");
- var stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
-
- // Step-down command should abort catchup.
- assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetStepDown: 60}));
-
- // Rename the primary.
- var steppedDownPrimary = stepUpResults.newPrimary;
- var newPrimary = rst.getPrimary();
- assert.neq(newPrimary, steppedDownPrimary);
-
- // Enable data replication on the stepped down primary and make sure it syncs old writes.
+function reconfigElectionAndCatchUpTimeout(electionTimeout, catchupTimeout) {
+ // Reconnect all nodes to make sure reconfig succeeds.
rst.nodes.forEach(reconnect);
- restartServerReplication(stepUpResults.oldSecondaries);
+ // Reconfigure replica set to decrease catchup timeout.
+ var newConfig = rst.getReplSetConfigFromNode();
+ newConfig.version++;
+ newConfig.settings.catchUpTimeoutMillis = catchupTimeout;
+ newConfig.settings.electionTimeoutMillis = electionTimeout;
+ reconfig(rst, newConfig);
rst.awaitReplication();
- checkOpInOplog(steppedDownPrimary, stepUpResults.latestOpOnOldPrimary, 1);
-
- rst.stopSet();
+ rst.awaitNodesAgreeOnPrimary();
+}
+
+rst.awaitReplication();
+
+jsTest.log("Case 1: The primary is up-to-date after refreshing heartbeats.");
+// Should complete transition to primary immediately.
+var newPrimary = stepUpNode(rst.getSecondary());
+// Should win an election and finish the transition very quickly.
+assert.eq(newPrimary, rst.getPrimary());
+rst.awaitReplication();
+
+jsTest.log("Case 2: The primary needs to catch up, succeeds in time.");
+let initialNewPrimaryStatus =
+ assert.commandWorked(rst.getSecondaries()[0].adminCommand({serverStatus: 1}));
+
+var stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+
+// Disable fail point to allow replication.
+restartServerReplication(stepUpResults.oldSecondaries);
+// getPrimary() blocks until the primary finishes drain mode.
+assert.eq(stepUpResults.newPrimary, rst.getPrimary());
+
+// Check that the 'numCatchUps' field has been incremented in serverStatus.
+let newNewPrimaryStatus =
+ assert.commandWorked(stepUpResults.newPrimary.adminCommand({serverStatus: 1}));
+verifyServerStatusChange(
+ initialNewPrimaryStatus.electionMetrics, newNewPrimaryStatus.electionMetrics, 'numCatchUps', 1);
+
+// Wait for all secondaries to catch up
+rst.awaitReplication();
+// Check the latest op on old primary is preserved on the new one.
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 1);
+rst.awaitReplication();
+
+jsTest.log("Case 3: The primary needs to catch up, but has to change sync source to catch up.");
+// Reconfig the election timeout to be longer than 1 minute so that the third node will no
+// longer be blacklisted by the new primary if it happened to be at the beginning of the test.
+reconfigElectionAndCatchUpTimeout(3 * 60 * 1000, conf.settings.catchUpTimeoutMillis);
+
+stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+
+// Disable fail point on the voter. Wait until it catches up with the old primary.
+restartServerReplication(stepUpResults.voter);
+assert.commandWorked(
+ stepUpResults.voter.adminCommand({replSetSyncFrom: stepUpResults.oldPrimary.host}));
+// Wait until the new primary knows the last applied optime on the voter, so it will keep
+// catching up after the old primary is disconnected.
+assert.soon(function() {
+ var replSetStatus =
+ assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetGetStatus: 1}));
+ var voterStatus = replSetStatus.members.filter(m => m.name == stepUpResults.voter.host)[0];
+ return rs.compareOpTimes(voterStatus.optime, stepUpResults.latestOpOnOldPrimary) == 0;
+});
+// Disconnect the new primary and the old one.
+stepUpResults.oldPrimary.disconnect(stepUpResults.newPrimary);
+// Disable the failpoint, the new primary should sync from the other secondary.
+restartServerReplication(stepUpResults.newPrimary);
+assert.eq(stepUpResults.newPrimary, rst.getPrimary());
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 1);
+// Restore the broken connection
+stepUpResults.oldPrimary.reconnect(stepUpResults.newPrimary);
+rst.awaitReplication();
+
+jsTest.log("Case 4: The primary needs to catch up, fails due to timeout.");
+// Reconfig to make the catchup timeout shorter.
+reconfigElectionAndCatchUpTimeout(conf.settings.electionTimeoutMillis, 10 * 1000);
+
+stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+// Wait until the new primary completes the transition to primary and writes a no-op.
+checkLog.contains(stepUpResults.newPrimary, "Catchup timed out after becoming primary");
+restartServerReplication(stepUpResults.newPrimary);
+assert.eq(stepUpResults.newPrimary, rst.getPrimary());
+
+// Wait for the no-op "new primary" after winning an election, so that we know it has
+// finished transition to primary.
+assert.soon(function() {
+ return rs.compareOpTimes(stepUpResults.latestOpOnOldPrimary,
+ getLatestOp(stepUpResults.newPrimary)) < 0;
+});
+// The extra oplog entries on the old primary are not replicated to the new one.
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
+restartServerReplication(stepUpResults.voter);
+rst.awaitReplication();
+
+jsTest.log("Case 5: The primary needs to catch up with no timeout, then gets aborted.");
+// Reconfig to make the catchup timeout infinite.
+reconfigElectionAndCatchUpTimeout(conf.settings.electionTimeoutMillis, -1);
+stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+
+// Abort catchup.
+assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetAbortPrimaryCatchUp: 1}));
+
+// Wait for the no-op "new primary" after winning an election, so that we know it has
+// finished transition to primary.
+assert.soon(function() {
+ return rs.compareOpTimes(stepUpResults.latestOpOnOldPrimary,
+ getLatestOp(stepUpResults.newPrimary)) < 0;
+});
+// The extra oplog entries on the old primary are not replicated to the new one.
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
+restartServerReplication(stepUpResults.oldSecondaries);
+rst.awaitReplication();
+checkOpInOplog(stepUpResults.newPrimary, stepUpResults.latestOpOnOldPrimary, 0);
+
+jsTest.log("Case 6: The primary needs to catch up with no timeout, but steps down.");
+var stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp();
+
+// Step-down command should abort catchup.
+assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetStepDown: 60}));
+
+// Rename the primary.
+var steppedDownPrimary = stepUpResults.newPrimary;
+var newPrimary = rst.getPrimary();
+assert.neq(newPrimary, steppedDownPrimary);
+
+// Enable data replication on the stepped down primary and make sure it syncs old writes.
+rst.nodes.forEach(reconnect);
+restartServerReplication(stepUpResults.oldSecondaries);
+rst.awaitReplication();
+checkOpInOplog(steppedDownPrimary, stepUpResults.latestOpOnOldPrimary, 1);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/catchup_takeover_one_high_priority.js b/jstests/replsets/catchup_takeover_one_high_priority.js
index 575407c19d2..10c6b99307f 100644
--- a/jstests/replsets/catchup_takeover_one_high_priority.js
+++ b/jstests/replsets/catchup_takeover_one_high_priority.js
@@ -14,86 +14,88 @@
// that it becomes primary.
(function() {
- 'use strict';
-
- load('jstests/replsets/rslib.js');
-
- var name = 'catchup_takeover_one_high_priority';
- var replSet = new ReplSetTest({name: name, nodes: 3, useBridge: true});
-
- var nodenames = replSet.nodeList();
- var nodes = replSet.startSet();
- replSet.initiateWithAnyNodeAsPrimary({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodenames[0]},
- {"_id": 1, "host": nodenames[1]},
- {"_id": 2, "host": nodenames[2], "priority": 2}
- ]
- });
-
- // Wait until node 2 becomes primary.
- replSet.waitForState(2, ReplSetTest.State.PRIMARY, replSet.kDefaultTimeoutMS);
- jsTestLog('node 2 is now primary');
-
- replSet.awaitReplication();
-
- // Stop replication and disconnect node 2 so that it cannot do a priority takeover.
- stopServerReplication(nodes[2]);
- nodes[2].disconnect(nodes[1]);
- nodes[2].disconnect(nodes[0]);
-
- // Ensure that node 0 becomes primary.
- assert.commandWorked(nodes[0].adminCommand({replSetStepUp: 1}));
- replSet.awaitNodesAgreeOnPrimary(replSet.kDefaultTimeoutMS, nodes.slice(0, 2));
- assert.eq(ReplSetTest.State.PRIMARY,
- assert.commandWorked(nodes[0].adminCommand('replSetGetStatus')).myState,
- nodes[0].host + " was not primary after step-up");
- jsTestLog('node 0 is now primary');
-
- // Sleep for a few seconds to ensure that node 2's optime is more than 2 seconds behind.
- // This will ensure it can't do a priority takeover until it catches up.
- sleep(3000);
-
- var primary = replSet.getPrimary();
- var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
-
- // Write something so that node 0 is ahead of node 1.
- stopServerReplication(nodes[1]);
- writeConcern = {writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
-
- nodes[2].reconnect(nodes[0]);
- nodes[2].reconnect(nodes[1]);
-
- // Step up a lagged node.
- assert.commandWorked(nodes[1].adminCommand({replSetStepUp: 1}));
- replSet.awaitNodesAgreeOnPrimary(replSet.kDefaultTimeoutMS, nodes);
- assert.eq(ReplSetTest.State.PRIMARY,
- assert.commandWorked(nodes[1].adminCommand('replSetGetStatus')).myState,
- nodes[1].host + " was not primary after step-up");
- jsTestLog('node 1 is now primary, but cannot accept writes');
-
- // Confirm that the most up-to-date node becomes primary
- // after the default catchup delay.
- replSet.waitForState(0, ReplSetTest.State.PRIMARY, 60 * 1000);
- jsTestLog('node 0 performed catchup takeover and is now primary');
-
- // Wait until the old primary steps down.
- replSet.awaitNodesAgreeOnPrimary();
-
- // Let the nodes catchup.
- restartServerReplication(nodes[1]);
- restartServerReplication(nodes[2]);
-
- // Confirm that the highest priority node becomes primary
- // after catching up.
- replSet.waitForState(2, ReplSetTest.State.PRIMARY, 30 * 1000);
- jsTestLog('node 2 performed priority takeover and is now primary');
-
- // Wait until the old primary steps down so the connections won't be closed during stopSet().
- replSet.waitForState(0, ReplSetTest.State.SECONDARY, replSet.kDefaultTimeoutMS);
-
- replSet.stopSet();
+'use strict';
+
+load('jstests/replsets/rslib.js');
+
+var name = 'catchup_takeover_one_high_priority';
+var replSet = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+
+var nodenames = replSet.nodeList();
+var nodes = replSet.startSet();
+replSet.initiateWithAnyNodeAsPrimary({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodenames[0]},
+ {"_id": 1, "host": nodenames[1]},
+ {"_id": 2, "host": nodenames[2], "priority": 2}
+ ]
+});
+
+// Wait until node 2 becomes primary.
+replSet.waitForState(2, ReplSetTest.State.PRIMARY, replSet.kDefaultTimeoutMS);
+jsTestLog('node 2 is now primary');
+
+replSet.awaitReplication();
+
+// Stop replication and disconnect node 2 so that it cannot do a priority takeover.
+stopServerReplication(nodes[2]);
+nodes[2].disconnect(nodes[1]);
+nodes[2].disconnect(nodes[0]);
+
+// Ensure that node 0 becomes primary.
+assert.commandWorked(nodes[0].adminCommand({replSetStepUp: 1}));
+replSet.awaitNodesAgreeOnPrimary(replSet.kDefaultTimeoutMS, nodes.slice(0, 2));
+assert.eq(ReplSetTest.State.PRIMARY,
+ assert.commandWorked(nodes[0].adminCommand('replSetGetStatus')).myState,
+ nodes[0].host + " was not primary after step-up");
+jsTestLog('node 0 is now primary');
+
+// Sleep for a few seconds to ensure that node 2's optime is more than 2 seconds behind.
+// This will ensure it can't do a priority takeover until it catches up.
+sleep(3000);
+
+var primary = replSet.getPrimary();
+var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
+assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
+
+// Write something so that node 0 is ahead of node 1.
+stopServerReplication(nodes[1]);
+writeConcern = {
+ writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}
+};
+assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
+
+nodes[2].reconnect(nodes[0]);
+nodes[2].reconnect(nodes[1]);
+
+// Step up a lagged node.
+assert.commandWorked(nodes[1].adminCommand({replSetStepUp: 1}));
+replSet.awaitNodesAgreeOnPrimary(replSet.kDefaultTimeoutMS, nodes);
+assert.eq(ReplSetTest.State.PRIMARY,
+ assert.commandWorked(nodes[1].adminCommand('replSetGetStatus')).myState,
+ nodes[1].host + " was not primary after step-up");
+jsTestLog('node 1 is now primary, but cannot accept writes');
+
+// Confirm that the most up-to-date node becomes primary
+// after the default catchup delay.
+replSet.waitForState(0, ReplSetTest.State.PRIMARY, 60 * 1000);
+jsTestLog('node 0 performed catchup takeover and is now primary');
+
+// Wait until the old primary steps down.
+replSet.awaitNodesAgreeOnPrimary();
+
+// Let the nodes catchup.
+restartServerReplication(nodes[1]);
+restartServerReplication(nodes[2]);
+
+// Confirm that the highest priority node becomes primary
+// after catching up.
+replSet.waitForState(2, ReplSetTest.State.PRIMARY, 30 * 1000);
+jsTestLog('node 2 performed priority takeover and is now primary');
+
+// Wait until the old primary steps down so the connections won't be closed during stopSet().
+replSet.waitForState(0, ReplSetTest.State.SECONDARY, replSet.kDefaultTimeoutMS);
+
+replSet.stopSet();
})();
diff --git a/jstests/replsets/catchup_takeover_two_nodes_ahead.js b/jstests/replsets/catchup_takeover_two_nodes_ahead.js
index 286beb5b7ac..6203889af88 100644
--- a/jstests/replsets/catchup_takeover_two_nodes_ahead.js
+++ b/jstests/replsets/catchup_takeover_two_nodes_ahead.js
@@ -11,61 +11,63 @@
// Confirm that the most up-to-date node becomes primary.
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/rslib.js');
- load('jstests/replsets/libs/election_metrics.js');
+load('jstests/replsets/rslib.js');
+load('jstests/replsets/libs/election_metrics.js');
- var name = 'catchup_takeover_two_nodes_ahead';
- var replSet = new ReplSetTest({name: name, nodes: 5});
- var nodes = replSet.startSet();
- var config = replSet.getReplSetConfig();
- // Prevent nodes from syncing from other secondaries.
- config.settings = {chainingAllowed: false};
- replSet.initiate(config);
- replSet.awaitReplication();
+var name = 'catchup_takeover_two_nodes_ahead';
+var replSet = new ReplSetTest({name: name, nodes: 5});
+var nodes = replSet.startSet();
+var config = replSet.getReplSetConfig();
+// Prevent nodes from syncing from other secondaries.
+config.settings = {
+ chainingAllowed: false
+};
+replSet.initiate(config);
+replSet.awaitReplication();
- // Write something so that nodes 0 and 1 are ahead.
- stopServerReplication(nodes.slice(2, 5));
- var primary = replSet.getPrimary();
- var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
+// Write something so that nodes 0 and 1 are ahead.
+stopServerReplication(nodes.slice(2, 5));
+var primary = replSet.getPrimary();
+var writeConcern = {writeConcern: {w: 2, wtimeout: replSet.kDefaultTimeoutMS}};
+assert.writeOK(primary.getDB(name).bar.insert({x: 100}, writeConcern));
- const initialPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+const initialPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
- // Write something so that node 0 is ahead of node 1.
- stopServerReplication(nodes[1]);
- writeConcern = {writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
+// Write something so that node 0 is ahead of node 1.
+stopServerReplication(nodes[1]);
+writeConcern = {
+ writeConcern: {w: 1, wtimeout: replSet.kDefaultTimeoutMS}
+};
+assert.writeOK(primary.getDB(name).bar.insert({y: 100}, writeConcern));
- // Step up one of the lagged nodes.
- assert.commandWorked(nodes[2].adminCommand({replSetStepUp: 1}));
- replSet.awaitNodesAgreeOnPrimary();
- assert.eq(ReplSetTest.State.PRIMARY,
- assert.commandWorked(nodes[2].adminCommand('replSetGetStatus')).myState,
- nodes[2].host + " was not primary after step-up");
- jsTestLog('node 2 is now primary, but cannot accept writes');
+// Step up one of the lagged nodes.
+assert.commandWorked(nodes[2].adminCommand({replSetStepUp: 1}));
+replSet.awaitNodesAgreeOnPrimary();
+assert.eq(ReplSetTest.State.PRIMARY,
+ assert.commandWorked(nodes[2].adminCommand('replSetGetStatus')).myState,
+ nodes[2].host + " was not primary after step-up");
+jsTestLog('node 2 is now primary, but cannot accept writes');
- // Make sure that node 2 cannot write anything. Because it is lagged and replication
- // has been stopped, it shouldn't be able to become master.
- assert.commandFailedWithCode(nodes[2].getDB(name).bar.insert({z: 100}, writeConcern),
- ErrorCodes.NotMaster);
+// Make sure that node 2 cannot write anything. Because it is lagged and replication
+// has been stopped, it shouldn't be able to become master.
+assert.commandFailedWithCode(nodes[2].getDB(name).bar.insert({z: 100}, writeConcern),
+ ErrorCodes.NotMaster);
- // Confirm that the most up-to-date node becomes primary
- // after the default catchup delay.
- replSet.waitForState(0, ReplSetTest.State.PRIMARY, 60 * 1000);
+// Confirm that the most up-to-date node becomes primary
+// after the default catchup delay.
+replSet.waitForState(0, ReplSetTest.State.PRIMARY, 60 * 1000);
- // Check that both the 'called' and 'successful' fields of the 'catchUpTakeover' election reason
- // counter have been incremented in serverStatus.
- const newPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
- verifyServerStatusElectionReasonCounterChange(initialPrimaryStatus.electionMetrics,
- newPrimaryStatus.electionMetrics,
- "catchUpTakeover",
- 1);
+// Check that both the 'called' and 'successful' fields of the 'catchUpTakeover' election reason
+// counter have been incremented in serverStatus.
+const newPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+verifyServerStatusElectionReasonCounterChange(
+ initialPrimaryStatus.electionMetrics, newPrimaryStatus.electionMetrics, "catchUpTakeover", 1);
- // Wait until the old primary steps down so the connections won't be closed.
- replSet.waitForState(2, ReplSetTest.State.SECONDARY, replSet.kDefaultTimeoutMS);
- // Let the nodes catchup.
- restartServerReplication(nodes.slice(1, 5));
- replSet.stopSet();
+// Wait until the old primary steps down so the connections won't be closed.
+replSet.waitForState(2, ReplSetTest.State.SECONDARY, replSet.kDefaultTimeoutMS);
+// Let the nodes catchup.
+restartServerReplication(nodes.slice(1, 5));
+replSet.stopSet();
})();
diff --git a/jstests/replsets/chaining_removal.js b/jstests/replsets/chaining_removal.js
index 111a12887df..1569cad2f71 100644
--- a/jstests/replsets/chaining_removal.js
+++ b/jstests/replsets/chaining_removal.js
@@ -1,73 +1,73 @@
// ensure removing a chained node does not break reporting of replication progress (SERVER-15849)
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
+"use strict";
+load("jstests/replsets/rslib.js");
- var numNodes = 5;
- var host = getHostName();
- var name = "chaining_removal";
+var numNodes = 5;
+var host = getHostName();
+var name = "chaining_removal";
- var replTest = new ReplSetTest({name: name, nodes: numNodes});
- var nodes = replTest.startSet();
- var port = replTest.ports;
- replTest.initiate({
- _id: name,
- members: [
- {_id: 0, host: nodes[0].host, priority: 3},
- {_id: 1, host: nodes[1].host, priority: 0},
- {_id: 2, host: nodes[2].host, priority: 0},
- {_id: 3, host: nodes[3].host, priority: 0},
- {_id: 4, host: nodes[4].host, priority: 0},
- ],
- });
- replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 0);
- var primary = replTest.getPrimary();
- replTest.awaitReplication();
+var replTest = new ReplSetTest({name: name, nodes: numNodes});
+var nodes = replTest.startSet();
+var port = replTest.ports;
+replTest.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: nodes[0].host, priority: 3},
+ {_id: 1, host: nodes[1].host, priority: 0},
+ {_id: 2, host: nodes[2].host, priority: 0},
+ {_id: 3, host: nodes[3].host, priority: 0},
+ {_id: 4, host: nodes[4].host, priority: 0},
+ ],
+});
+replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 0);
+var primary = replTest.getPrimary();
+replTest.awaitReplication();
- // When setting up chaining on slow machines, we do not want slow writes or delayed heartbeats
- // to cause our nodes to invalidate the sync source provided in the 'replSetSyncFrom' command.
- // To achieve this, we disable the server parameter 'maxSyncSourceLagSecs' (see
- // repl_settings_init.cpp and TopologyCoordinatorImpl::Options) in
- // TopologyCoordinatorImpl::shouldChangeSyncSource().
- assert.commandWorked(nodes[1].getDB('admin').runCommand(
- {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'alwaysOn'}));
- assert.commandWorked(nodes[4].getDB('admin').runCommand(
- {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'alwaysOn'}));
+// When setting up chaining on slow machines, we do not want slow writes or delayed heartbeats
+// to cause our nodes to invalidate the sync source provided in the 'replSetSyncFrom' command.
+// To achieve this, we disable the server parameter 'maxSyncSourceLagSecs' (see
+// repl_settings_init.cpp and TopologyCoordinatorImpl::Options) in
+// TopologyCoordinatorImpl::shouldChangeSyncSource().
+assert.commandWorked(nodes[1].getDB('admin').runCommand(
+ {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'alwaysOn'}));
+assert.commandWorked(nodes[4].getDB('admin').runCommand(
+ {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'alwaysOn'}));
- // Force node 1 to sync directly from node 0.
- syncFrom(nodes[1], nodes[0], replTest);
- // Force node 4 to sync through node 1.
- syncFrom(nodes[4], nodes[1], replTest);
+// Force node 1 to sync directly from node 0.
+syncFrom(nodes[1], nodes[0], replTest);
+// Force node 4 to sync through node 1.
+syncFrom(nodes[4], nodes[1], replTest);
- // write that should reach all nodes
- var timeout = ReplSetTest.kDefaultTimeoutMS;
- var options = {writeConcern: {w: numNodes, wtimeout: timeout}};
- assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
+// write that should reach all nodes
+var timeout = ReplSetTest.kDefaultTimeoutMS;
+var options = {writeConcern: {w: numNodes, wtimeout: timeout}};
+assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
- // Re-enable 'maxSyncSourceLagSecs' checking on sync source.
- assert.commandWorked(nodes[1].getDB('admin').runCommand(
- {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'off'}));
- assert.commandWorked(nodes[4].getDB('admin').runCommand(
- {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'off'}));
+// Re-enable 'maxSyncSourceLagSecs' checking on sync source.
+assert.commandWorked(nodes[1].getDB('admin').runCommand(
+ {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'off'}));
+assert.commandWorked(nodes[4].getDB('admin').runCommand(
+ {configureFailPoint: 'disableMaxSyncSourceLagSecs', mode: 'off'}));
- var config = primary.getDB("local").system.replset.findOne();
- config.members.pop();
- config.version++;
- // remove node 4
- replTest.stop(4);
- try {
- primary.adminCommand({replSetReconfig: config});
- } catch (e) {
- print("error: " + e);
- }
+var config = primary.getDB("local").system.replset.findOne();
+config.members.pop();
+config.version++;
+// remove node 4
+replTest.stop(4);
+try {
+ primary.adminCommand({replSetReconfig: config});
+} catch (e) {
+ print("error: " + e);
+}
- // ensure writing to all four nodes still works
- primary = replTest.getPrimary();
- const liveSlaves = [nodes[1], nodes[2], nodes[3]];
- replTest.awaitReplication(null, null, liveSlaves);
- options.writeConcern.w = 4;
- assert.writeOK(primary.getDB(name).foo.insert({x: 2}, options));
+// ensure writing to all four nodes still works
+primary = replTest.getPrimary();
+const liveSlaves = [nodes[1], nodes[2], nodes[3]];
+replTest.awaitReplication(null, null, liveSlaves);
+options.writeConcern.w = 4;
+assert.writeOK(primary.getDB(name).foo.insert({x: 2}, options));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/change_stream_speculative_majority.js b/jstests/replsets/change_stream_speculative_majority.js
index f8833c7963f..fb37968184e 100644
--- a/jstests/replsets/change_stream_speculative_majority.js
+++ b/jstests/replsets/change_stream_speculative_majority.js
@@ -4,83 +4,81 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
- const name = "change_stream_speculative_majority";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
-
- const dbName = name;
- const collName = "coll";
-
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
- let primaryDB = primary.getDB(dbName);
- let primaryColl = primaryDB[collName];
-
- // Open a change stream.
- let res = primaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}, maxTimeMS: 5000});
- assert.commandWorked(res);
- let cursorId = res.cursor.id;
-
- // Insert a document on primary and let it majority commit.
- assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- // Receive the first change event.
- res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
-
- // Save the resume token.
- let resumeToken = changes[0]["_id"];
-
- // This query should time out waiting for new results and return an empty batch.
- res = primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, maxTimeMS: 5000});
- assert.eq(res.cursor.nextBatch, []);
-
- // Pause replication on the secondary so that writes won't majority commit.
- stopServerReplication(secondary);
-
- // Do a new write on primary.
- assert.commandWorked(primaryColl.insert({_id: 2}));
-
- // The change stream query should time out waiting for the new result to majority commit.
- res = primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, maxTimeMS: 5000});
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- // An aggregate trying to resume a stream that includes the change should also time out.
- res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {},
- maxTimeMS: 5000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- // Resume the stream after restarting replication. We should now be able to see the new event.
- restartServerReplication(secondary);
- replTest.awaitReplication();
-
- // Re-open the stream, and receive the new event.
- res = primaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
- assert.commandWorked(res);
- changes = res.cursor.firstBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 2});
- assert.eq(changes[0]["operationType"], "insert");
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+
+const name = "change_stream_speculative_majority";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = name;
+const collName = "coll";
+
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+let primaryDB = primary.getDB(dbName);
+let primaryColl = primaryDB[collName];
+
+// Open a change stream.
+let res = primaryDB.runCommand(
+ {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}, maxTimeMS: 5000});
+assert.commandWorked(res);
+let cursorId = res.cursor.id;
+
+// Insert a document on primary and let it majority commit.
+assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+// Receive the first change event.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+
+// Save the resume token.
+let resumeToken = changes[0]["_id"];
+
+// This query should time out waiting for new results and return an empty batch.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
+assert.eq(res.cursor.nextBatch, []);
+
+// Pause replication on the secondary so that writes won't majority commit.
+stopServerReplication(secondary);
+
+// Do a new write on primary.
+assert.commandWorked(primaryColl.insert({_id: 2}));
+
+// The change stream query should time out waiting for the new result to majority commit.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+// An aggregate trying to resume a stream that includes the change should also time out.
+res = primaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {},
+ maxTimeMS: 5000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+// Resume the stream after restarting replication. We should now be able to see the new event.
+restartServerReplication(secondary);
+replTest.awaitReplication();
+
+// Re-open the stream, and receive the new event.
+res = primaryDB.runCommand(
+ {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
+assert.commandWorked(res);
+changes = res.cursor.firstBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 2});
+assert.eq(changes[0]["operationType"], "insert");
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js b/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js
index b9a55d1c3fa..8b1e9682403 100644
--- a/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js
+++ b/jstests/replsets/change_stream_speculative_majority_conflicting_catalog_changes.js
@@ -7,47 +7,47 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- const replTest = new ReplSetTest({
- name: "replset",
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({
+ name: "replset",
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
- let primaryDB = primary.getDB(dbName);
- let primaryColl = primaryDB[collName];
- let secondaryDB = secondary.getDB(dbName);
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+let primaryDB = primary.getDB(dbName);
+let primaryColl = primaryDB[collName];
+let secondaryDB = secondary.getDB(dbName);
- // Insert some documents on the primary that we can index.
- var bulk = primaryColl.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- let doc = {};
- bulk.insert({a: i, b: i, c: i, d: i, e: i});
- }
- assert.commandWorked(bulk.execute());
+// Insert some documents on the primary that we can index.
+var bulk = primaryColl.initializeUnorderedBulkOp();
+for (var i = 0; i < 1000; i++) {
+ let doc = {};
+ bulk.insert({a: i, b: i, c: i, d: i, e: i});
+}
+assert.commandWorked(bulk.execute());
- // Start several index builds on the primary. This should make it likely that index builds are
- // in progress on the secondary while doing reads below.
- primaryColl.createIndex({a: 1});
- primaryColl.createIndex({b: 1});
- primaryColl.createIndex({c: 1});
- primaryColl.createIndex({d: 1});
- primaryColl.createIndex({e: 1});
+// Start several index builds on the primary. This should make it likely that index builds are
+// in progress on the secondary while doing reads below.
+primaryColl.createIndex({a: 1});
+primaryColl.createIndex({b: 1});
+primaryColl.createIndex({c: 1});
+primaryColl.createIndex({d: 1});
+primaryColl.createIndex({e: 1});
- // Do a bunch of change stream reads against the secondary. We are not worried about the
- // responses, since we are only verifying that the server doesn't crash.
- for (var i = 0; i < 20; i++) {
- assert.commandWorked(secondaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- }
+// Do a bunch of change stream reads against the secondary. We are not worried about the
+// responses, since we are only verifying that the server doesn't crash.
+for (var i = 0; i < 20; i++) {
+ assert.commandWorked(
+ secondaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+}
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js b/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js
index 2b3c503e4bd..88e3bfa47a7 100644
--- a/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js
+++ b/jstests/replsets/change_stream_speculative_majority_lastApplied_lag.js
@@ -6,102 +6,101 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
- load("jstests/libs/check_log.js"); // For checkLog.
- load("jstests/libs/parallelTester.js"); // for ScopedThread.
+load('jstests/libs/change_stream_util.js'); // For ChangeStreamTest.
+load("jstests/libs/check_log.js"); // For checkLog.
+load("jstests/libs/parallelTester.js"); // for ScopedThread.
- const name = "change_stream_speculative_majority_lastApplied_lag";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
+const name = "change_stream_speculative_majority_lastApplied_lag";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const collName = "coll";
+const dbName = name;
+const collName = "coll";
- const primary = replTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB[collName];
+const primary = replTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB[collName];
- // Do a few operations on the primary and let them both majority commit. Later on we will
- // receive both of these operations in a change stream.
- let res = assert.commandWorked(primaryColl.runCommand(
- "insert", {documents: [{_id: 1, v: 0}], writeConcern: {w: "majority"}}));
- assert.commandWorked(
- primaryColl.update({_id: 1}, {$set: {v: 1}}, {writeConcern: {w: "majority"}}));
+// Do a few operations on the primary and let them both majority commit. Later on we will
+// receive both of these operations in a change stream.
+let res = assert.commandWorked(
+ primaryColl.runCommand("insert", {documents: [{_id: 1, v: 0}], writeConcern: {w: "majority"}}));
+assert.commandWorked(primaryColl.update({_id: 1}, {$set: {v: 1}}, {writeConcern: {w: "majority"}}));
- // Save this operation time so we can start a change stream from here.
- let startOperTime = res.operationTime;
+// Save this operation time so we can start a change stream from here.
+let startOperTime = res.operationTime;
- // Make the primary hang after it has completed a write but before it has advanced lastApplied
- // for that write.
- primaryDB.adminCommand(
- {configureFailPoint: "hangBeforeLogOpAdvancesLastApplied", mode: "alwaysOn"});
+// Make the primary hang after it has completed a write but before it has advanced lastApplied
+// for that write.
+primaryDB.adminCommand(
+ {configureFailPoint: "hangBeforeLogOpAdvancesLastApplied", mode: "alwaysOn"});
- // Function which will be used by the background thread to perform an update on the specified
- // host, database, and collection.
- function doUpdate(host, dbName, collName, query, update) {
- let hostDB = (new Mongo(host)).getDB(dbName);
- assert.commandWorked(hostDB[collName].update(query, update));
- }
+// Function which will be used by the background thread to perform an update on the specified
+// host, database, and collection.
+function doUpdate(host, dbName, collName, query, update) {
+ let hostDB = (new Mongo(host)).getDB(dbName);
+ assert.commandWorked(hostDB[collName].update(query, update));
+}
- // Do a document update on primary, but don't wait for it to majority commit. The write should
- // hang due to the enabled failpoint.
- jsTestLog("Starting update on primary.");
- var primaryWrite =
- new ScopedThread(doUpdate, primary.host, dbName, collName, {_id: 1}, {$set: {v: 2}});
- primaryWrite.start();
+// Do a document update on primary, but don't wait for it to majority commit. The write should
+// hang due to the enabled failpoint.
+jsTestLog("Starting update on primary.");
+var primaryWrite =
+ new ScopedThread(doUpdate, primary.host, dbName, collName, {_id: 1}, {$set: {v: 2}});
+primaryWrite.start();
- // Wait for the fail point to be hit. By the time the primary hits this fail point, the update
- // should be visible. 'lastApplied', however, has not yet been advanced yet. We check both the
- // document state and the logs to make sure we hit the failpoint for the correct operation.
- assert.soon(() => (primaryColl.findOne({_id: 1}).v === 2));
- checkLog.contains(primary, 'hangBeforeLogOpAdvancesLastApplied fail point enabled.');
+// Wait for the fail point to be hit. By the time the primary hits this fail point, the update
+// should be visible. 'lastApplied', however, has not yet been advanced yet. We check both the
+// document state and the logs to make sure we hit the failpoint for the correct operation.
+assert.soon(() => (primaryColl.findOne({_id: 1}).v === 2));
+checkLog.contains(primary, 'hangBeforeLogOpAdvancesLastApplied fail point enabled.');
- // Open a change stream on the primary. The stream should only return the initial insert and the
- // first of the two update events, since the second update is not yet majority-committed.
- // Despite the fact that the effects of the latter update are already visible to local readers,
- // speculative majority will read at min(lastApplied, allCommitted), and so change stream's
- // 'fullDocument' lookup should also *not* return the second update's uncommitted changes.
- jsTestLog("Opening a change stream on the primary.");
- const cst = new ChangeStreamTest(primaryDB);
- let cursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {startAtOperationTime: startOperTime, fullDocument: "updateLookup"}}],
- collection: collName
- });
+// Open a change stream on the primary. The stream should only return the initial insert and the
+// first of the two update events, since the second update is not yet majority-committed.
+// Despite the fact that the effects of the latter update are already visible to local readers,
+// speculative majority will read at min(lastApplied, allCommitted), and so change stream's
+// 'fullDocument' lookup should also *not* return the second update's uncommitted changes.
+jsTestLog("Opening a change stream on the primary.");
+const cst = new ChangeStreamTest(primaryDB);
+let cursor = cst.startWatchingChanges({
+ pipeline:
+ [{$changeStream: {startAtOperationTime: startOperTime, fullDocument: "updateLookup"}}],
+ collection: collName
+});
- cst.assertNextChangesEqual({
- cursor: cursor,
- expectedChanges: [
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, v: 0},
- ns: {db: dbName, coll: collName},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, v: 1},
- ns: {db: dbName, coll: collName},
- updateDescription: {removedFields: [], updatedFields: {v: 1}},
- operationType: "update",
- }
- ]
- });
+cst.assertNextChangesEqual({
+ cursor: cursor,
+ expectedChanges: [
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, v: 0},
+ ns: {db: dbName, coll: collName},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, v: 1},
+ ns: {db: dbName, coll: collName},
+ updateDescription: {removedFields: [], updatedFields: {v: 1}},
+ operationType: "update",
+ }
+ ]
+});
- // Make sure the cursor does not return any more change events.
- cursor = cst.getNextBatch(cursor);
- assert.eq(cursor.nextBatch.length, 0);
+// Make sure the cursor does not return any more change events.
+cursor = cst.getNextBatch(cursor);
+assert.eq(cursor.nextBatch.length, 0);
- // Disable the failpoint to let the test complete.
- primaryDB.adminCommand({configureFailPoint: "hangBeforeLogOpAdvancesLastApplied", mode: "off"});
+// Disable the failpoint to let the test complete.
+primaryDB.adminCommand({configureFailPoint: "hangBeforeLogOpAdvancesLastApplied", mode: "off"});
- primaryWrite.join();
- replTest.stopSet();
+primaryWrite.join();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js b/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js
index 2016cf0c6ea..c70054f756e 100644
--- a/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js
+++ b/jstests/replsets/change_stream_speculative_majority_latest_oplog_timestamp.js
@@ -12,80 +12,78 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
- const name = "change_stream_speculative_majority_latest_oplog_timestamp";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
-
- const dbName = name;
- const collName = "coll";
- const otherCollName = "coll_other";
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
-
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB[collName];
-
- assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
- let res = primaryDB.runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}, maxTimeMS: 5000});
-
- assert.commandWorked(res);
- let cursorId = res.cursor.id;
-
- // Insert a document on primary and let it majority commit.
- assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
-
- // Pause replication on the secondary so that further writes won't majority commit.
- jsTestLog("Stopping replication to secondary.");
- stopServerReplication(secondary);
-
- // Receive the first change event.
- res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
-
- // Extract the postBatchResumeToken from the first batch.
- const initialPostBatchResumeToken = res.cursor.postBatchResumeToken;
- assert.neq(initialPostBatchResumeToken, undefined);
-
- // Do a write on a collection that we are not watching changes for.
- let otherWriteRes = primaryDB.runCommand({insert: otherCollName, documents: [{_id: 1}]});
- let otherWriteOpTime = otherWriteRes.operationTime;
-
- // Replication to the secondary is paused, so the write to 'otherCollName' cannot majority
- // commit. A change stream getMore is expected to return the "latest oplog timestamp" which it
- // scanned and this timestamp must be majority committed. So, this getMore should time out
- // waiting for the previous write to majority commit, even though it's on a collection that is
- // not being watched.
- res = primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, maxTimeMS: 5000});
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Restarting replication to secondary.");
- restartServerReplication(secondary);
- replTest.awaitReplication();
-
- // Now that writes can replicate again, the previous operation should have majority committed,
- // making it safe to advance the postBatchResumeToken. Note that no further events are returned,
- // indicating that the new PBRT is a high water mark generated at the latest oplog timestamp.
- res = primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, maxTimeMS: 5000});
- assert.commandWorked(res);
- assert.eq(res.cursor.nextBatch, []);
- assert.gt(bsonWoCompare(res.cursor.postBatchResumeToken, initialPostBatchResumeToken), 0);
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+
+const name = "change_stream_speculative_majority_latest_oplog_timestamp";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = name;
+const collName = "coll";
+const otherCollName = "coll_other";
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB[collName];
+
+assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+
+let res = primaryDB.runCommand(
+ {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}, maxTimeMS: 5000});
+
+assert.commandWorked(res);
+let cursorId = res.cursor.id;
+
+// Insert a document on primary and let it majority commit.
+assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+
+// Pause replication on the secondary so that further writes won't majority commit.
+jsTestLog("Stopping replication to secondary.");
+stopServerReplication(secondary);
+
+// Receive the first change event.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+
+// Extract the postBatchResumeToken from the first batch.
+const initialPostBatchResumeToken = res.cursor.postBatchResumeToken;
+assert.neq(initialPostBatchResumeToken, undefined);
+
+// Do a write on a collection that we are not watching changes for.
+let otherWriteRes = primaryDB.runCommand({insert: otherCollName, documents: [{_id: 1}]});
+let otherWriteOpTime = otherWriteRes.operationTime;
+
+// Replication to the secondary is paused, so the write to 'otherCollName' cannot majority
+// commit. A change stream getMore is expected to return the "latest oplog timestamp" which it
+// scanned and this timestamp must be majority committed. So, this getMore should time out
+// waiting for the previous write to majority commit, even though it's on a collection that is
+// not being watched.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+jsTestLog("Restarting replication to secondary.");
+restartServerReplication(secondary);
+replTest.awaitReplication();
+
+// Now that writes can replicate again, the previous operation should have majority committed,
+// making it safe to advance the postBatchResumeToken. Note that no further events are returned,
+// indicating that the new PBRT is a high water mark generated at the latest oplog timestamp.
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName, maxTimeMS: 5000});
+assert.commandWorked(res);
+assert.eq(res.cursor.nextBatch, []);
+assert.gt(bsonWoCompare(res.cursor.postBatchResumeToken, initialPostBatchResumeToken), 0);
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_optimized_wait.js b/jstests/replsets/change_stream_speculative_majority_optimized_wait.js
index 20585c11336..65bd4599722 100644
--- a/jstests/replsets/change_stream_speculative_majority_optimized_wait.js
+++ b/jstests/replsets/change_stream_speculative_majority_optimized_wait.js
@@ -6,78 +6,78 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- const name = "change_stream_speculative_majority";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
+const name = "change_stream_speculative_majority";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const collName = "coll";
+const dbName = name;
+const collName = "coll";
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
- let primaryDB = primary.getDB(dbName);
- let primaryColl = primaryDB[collName];
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+let primaryDB = primary.getDB(dbName);
+let primaryColl = primaryDB[collName];
- // Receive 1 change to get an initial resume token.
- let res = assert.commandWorked(
- primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- let cursorId = res.cursor.id;
- assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
- res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
- assert.eq(res.cursor.nextBatch.length, 1);
- let resumeToken = res.cursor.nextBatch[0]["_id"];
+// Receive 1 change to get an initial resume token.
+let res = assert.commandWorked(
+ primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+let cursorId = res.cursor.id;
+assert.commandWorked(primaryColl.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+res = primary.getDB(dbName).runCommand({getMore: cursorId, collection: collName});
+assert.eq(res.cursor.nextBatch.length, 1);
+let resumeToken = res.cursor.nextBatch[0]["_id"];
- // Open a change stream.
- res = assert.commandWorked(
- primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- cursorId = res.cursor.id;
+// Open a change stream.
+res = assert.commandWorked(
+ primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+cursorId = res.cursor.id;
- // Insert documents to fill one batch and let them majority commit.
- let batchSize = 2;
- assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(primaryColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+// Insert documents to fill one batch and let them majority commit.
+let batchSize = 2;
+assert.commandWorked(primaryColl.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(primaryColl.insert({_id: 2}, {writeConcern: {w: "majority"}}));
- // Pause replication on the secondary so that writes won't majority commit.
- stopServerReplication(secondary);
+// Pause replication on the secondary so that writes won't majority commit.
+stopServerReplication(secondary);
- // Do write on primary that won't majority commit but will advance the last applied optime.
- assert.commandWorked(primaryColl.insert({_id: 3}));
+// Do write on primary that won't majority commit but will advance the last applied optime.
+assert.commandWorked(primaryColl.insert({_id: 3}));
- // Receive one batch of change events. We should be able to read only the majority committed
- // change events and no further in order to generate this batch.
- res = assert.commandWorked(primary.getDB(dbName).runCommand(
- {getMore: cursorId, collection: collName, batchSize: batchSize}));
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 2);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
- assert.eq(changes[1]["fullDocument"], {_id: 2});
- assert.eq(changes[1]["operationType"], "insert");
+// Receive one batch of change events. We should be able to read only the majority committed
+// change events and no further in order to generate this batch.
+res = assert.commandWorked(primary.getDB(dbName).runCommand(
+ {getMore: cursorId, collection: collName, batchSize: batchSize}));
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 2);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+assert.eq(changes[1]["fullDocument"], {_id: 2});
+assert.eq(changes[1]["operationType"], "insert");
- // Make sure that 'aggregate' commands also utilize the optimization.
- res = assert.commandWorked(primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {batchSize: batchSize}
- }));
- changes = res.cursor.firstBatch;
- assert.eq(changes.length, 2);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
- assert.eq(changes[1]["fullDocument"], {_id: 2});
- assert.eq(changes[1]["operationType"], "insert");
+// Make sure that 'aggregate' commands also utilize the optimization.
+res = assert.commandWorked(primaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {batchSize: batchSize}
+}));
+changes = res.cursor.firstBatch;
+assert.eq(changes.length, 2);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+assert.eq(changes[1]["fullDocument"], {_id: 2});
+assert.eq(changes[1]["operationType"], "insert");
- // Let the test finish.
- restartServerReplication(secondary);
+// Let the test finish.
+restartServerReplication(secondary);
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_speculative_majority_rollback.js b/jstests/replsets/change_stream_speculative_majority_rollback.js
index 2c8aa9492af..06e4fccc51d 100644
--- a/jstests/replsets/change_stream_speculative_majority_rollback.js
+++ b/jstests/replsets/change_stream_speculative_majority_rollback.js
@@ -4,102 +4,100 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- 'use strict';
-
- load("jstests/replsets/libs/rollback_test.js"); // for RollbackTest.
-
- // Disable implicit sessions so it's easy to run commands from different threads.
- TestData.disableImplicitSessions = true;
-
- const name = "change_stream_speculative_majority_rollback";
- const dbName = name;
- const collName = "coll";
-
- // Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so we
- // will utilize speculative majority reads for change streams.
- const replTest = new ReplSetTest({
- name,
- nodes: 3,
- useBridge: true,
- settings: {chainingAllowed: false},
- nodeOptions: {enableMajorityReadConcern: "false"}
- });
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- replTest.initiate(config);
-
- const rollbackTest = new RollbackTest(name, replTest);
- const primary = rollbackTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
- let coll = primaryDB[collName];
-
- // Create a collection.
- assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
-
- // Open a change stream on the initial primary.
- let res =
- primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}});
- assert.commandWorked(res);
- let cursorId = res.cursor.id;
-
- // Receive an initial change event and save the resume token.
- assert.commandWorked(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- res = primaryDB.runCommand({getMore: cursorId, collection: collName});
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
- let resumeToken = changes[0]["_id"];
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- assert.eq(rollbackNode, primary);
-
- // Insert a few items that will be rolled back.
- assert.commandWorked(coll.insert({_id: 2}));
- assert.commandWorked(coll.insert({_id: 3}));
- assert.commandWorked(coll.insert({_id: 4}));
-
- let getChangeEvent = new ScopedThread(function(host, cursorId, dbName, collName) {
- jsTestLog("Trying to receive change event from divergent primary.");
- const nodeDB = new Mongo(host).getDB(dbName);
- try {
- return nodeDB.runCommand({getMore: eval(cursorId), collection: collName});
- } catch (e) {
- return isNetworkError(e);
- }
- }, rollbackNode.host, tojson(cursorId), dbName, collName);
- getChangeEvent.start();
-
- // Make sure the change stream query started.
- assert.soon(() => primaryDB.currentOp({"command.getMore": cursorId}).inprog.length === 1);
-
- // Do some operations on the new primary that we can receive in a resumed stream.
- let syncSource = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- coll = syncSource.getDB(dbName)[collName];
- assert.commandWorked(coll.insert({_id: 5}));
- assert.commandWorked(coll.insert({_id: 6}));
- assert.commandWorked(coll.insert({_id: 7}));
-
- // Let rollback begin and complete.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // The change stream query should have failed when the node entered rollback.
- assert(getChangeEvent.returnData());
-
- jsTestLog("Resuming change stream against new primary.");
- res = syncSource.getDB(dbName).runCommand(
- {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
- changes = res.cursor.firstBatch;
- assert.eq(changes.length, 3);
- assert.eq(changes[0]["fullDocument"], {_id: 5});
- assert.eq(changes[0]["operationType"], "insert");
- assert.eq(changes[1]["fullDocument"], {_id: 6});
- assert.eq(changes[1]["operationType"], "insert");
- assert.eq(changes[2]["fullDocument"], {_id: 7});
- assert.eq(changes[2]["operationType"], "insert");
-
- rollbackTest.stop();
-
+'use strict';
+
+load("jstests/replsets/libs/rollback_test.js"); // for RollbackTest.
+
+// Disable implicit sessions so it's easy to run commands from different threads.
+TestData.disableImplicitSessions = true;
+
+const name = "change_stream_speculative_majority_rollback";
+const dbName = name;
+const collName = "coll";
+
+// Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so we
+// will utilize speculative majority reads for change streams.
+const replTest = new ReplSetTest({
+ name,
+ nodes: 3,
+ useBridge: true,
+ settings: {chainingAllowed: false},
+ nodeOptions: {enableMajorityReadConcern: "false"}
+});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+replTest.initiate(config);
+
+const rollbackTest = new RollbackTest(name, replTest);
+const primary = rollbackTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
+let coll = primaryDB[collName];
+
+// Create a collection.
+assert.commandWorked(coll.insert({_id: 0}, {writeConcern: {w: "majority"}}));
+
+// Open a change stream on the initial primary.
+let res = primaryDB.runCommand({aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}});
+assert.commandWorked(res);
+let cursorId = res.cursor.id;
+
+// Receive an initial change event and save the resume token.
+assert.commandWorked(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+res = primaryDB.runCommand({getMore: cursorId, collection: collName});
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+let resumeToken = changes[0]["_id"];
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+assert.eq(rollbackNode, primary);
+
+// Insert a few items that will be rolled back.
+assert.commandWorked(coll.insert({_id: 2}));
+assert.commandWorked(coll.insert({_id: 3}));
+assert.commandWorked(coll.insert({_id: 4}));
+
+let getChangeEvent = new ScopedThread(function(host, cursorId, dbName, collName) {
+ jsTestLog("Trying to receive change event from divergent primary.");
+ const nodeDB = new Mongo(host).getDB(dbName);
+ try {
+ return nodeDB.runCommand({getMore: eval(cursorId), collection: collName});
+ } catch (e) {
+ return isNetworkError(e);
+ }
+}, rollbackNode.host, tojson(cursorId), dbName, collName);
+getChangeEvent.start();
+
+// Make sure the change stream query started.
+assert.soon(() => primaryDB.currentOp({"command.getMore": cursorId}).inprog.length === 1);
+
+// Do some operations on the new primary that we can receive in a resumed stream.
+let syncSource = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+coll = syncSource.getDB(dbName)[collName];
+assert.commandWorked(coll.insert({_id: 5}));
+assert.commandWorked(coll.insert({_id: 6}));
+assert.commandWorked(coll.insert({_id: 7}));
+
+// Let rollback begin and complete.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// The change stream query should have failed when the node entered rollback.
+assert(getChangeEvent.returnData());
+
+jsTestLog("Resuming change stream against new primary.");
+res = syncSource.getDB(dbName).runCommand(
+ {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}});
+changes = res.cursor.firstBatch;
+assert.eq(changes.length, 3);
+assert.eq(changes[0]["fullDocument"], {_id: 5});
+assert.eq(changes[0]["operationType"], "insert");
+assert.eq(changes[1]["fullDocument"], {_id: 6});
+assert.eq(changes[1]["operationType"], "insert");
+assert.eq(changes[2]["fullDocument"], {_id: 7});
+assert.eq(changes[2]["operationType"], "insert");
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js b/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
index 4665009318a..29beca07a26 100644
--- a/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
+++ b/jstests/replsets/change_stream_speculative_majority_secondary_batch_application.js
@@ -6,71 +6,70 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- load("jstests/libs/check_log.js"); // for checkLog.
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/check_log.js"); // for checkLog.
- const name = "speculative_majority_secondary";
- const replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
+const name = "speculative_majority_secondary";
+const replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
- const dbName = name;
- const collName = "coll";
+const dbName = name;
+const collName = "coll";
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
- let primaryDB = primary.getDB(dbName);
- let primaryColl = primaryDB[collName];
- let secondaryDB = secondary.getDB(dbName);
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+let primaryDB = primary.getDB(dbName);
+let primaryColl = primaryDB[collName];
+let secondaryDB = secondary.getDB(dbName);
- // Do a couple writes on primary and save the first operation time, so we can start the
- // secondary change stream from this point.
- let res = assert.commandWorked(primaryColl.runCommand("insert", {documents: [{_id: 0}]}));
- let startTime = res.operationTime;
- assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 0}}));
- replTest.awaitLastOpCommitted();
+// Do a couple writes on primary and save the first operation time, so we can start the
+// secondary change stream from this point.
+let res = assert.commandWorked(primaryColl.runCommand("insert", {documents: [{_id: 0}]}));
+let startTime = res.operationTime;
+assert.commandWorked(primaryColl.update({_id: 0}, {$set: {v: 0}}));
+replTest.awaitLastOpCommitted();
- // Make the secondary pause after it has written a batch of entries to the oplog but before it
- // has applied them.
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "alwaysOn"}));
+// Make the secondary pause after it has written a batch of entries to the oplog but before it
+// has applied them.
+assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "alwaysOn"}));
- // Pause replication so that the secondary will sync and apply the set of writes from the
- // primary in a single batch.
- stopServerReplication(secondary);
+// Pause replication so that the secondary will sync and apply the set of writes from the
+// primary in a single batch.
+stopServerReplication(secondary);
- jsTestLog("Do some writes on the primary.");
- assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 1}}));
- assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 2}}));
- assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 3}}));
+jsTestLog("Do some writes on the primary.");
+assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 1}}));
+assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 2}}));
+assert.writeOK(primaryColl.update({_id: 0}, {$set: {v: 3}}));
- // Restart server replication on secondary and wait for the failpoint to be hit.
- jsTestLog("Restarting server replication on secondary.");
- restartServerReplication(secondary);
- checkLog.contains(secondary,
- "pauseBatchApplicationAfterWritingOplogEntries fail point enabled");
+// Restart server replication on secondary and wait for the failpoint to be hit.
+jsTestLog("Restarting server replication on secondary.");
+restartServerReplication(secondary);
+checkLog.contains(secondary, "pauseBatchApplicationAfterWritingOplogEntries fail point enabled");
- // Open a change stream on the secondary.
- res = assert.commandWorked(secondaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {startAtOperationTime: startTime}}],
- cursor: {}
- }));
+// Open a change stream on the secondary.
+res = assert.commandWorked(secondaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {startAtOperationTime: startTime}}],
+ cursor: {}
+}));
- // We should not expect to see any of the ops currently being applied in the secondary batch.
- let changes = res.cursor.firstBatch;
- assert.eq(changes.length, 2);
- assert.eq(changes[0].fullDocument, {_id: 0});
- assert.eq(changes[1].updateDescription.updatedFields, {v: 0});
+// We should not expect to see any of the ops currently being applied in the secondary batch.
+let changes = res.cursor.firstBatch;
+assert.eq(changes.length, 2);
+assert.eq(changes[0].fullDocument, {_id: 0});
+assert.eq(changes[1].updateDescription.updatedFields, {v: 0});
- // Turn off the failpoint and let the test complete.
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "off"}));
- replTest.stopSet();
+// Turn off the failpoint and let the test complete.
+assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "pauseBatchApplicationAfterWritingOplogEntries", mode: "off"}));
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/change_stream_stepdown.js b/jstests/replsets/change_stream_stepdown.js
index 2df2a11c8c8..1a6f6fb28cb 100644
--- a/jstests/replsets/change_stream_stepdown.js
+++ b/jstests/replsets/change_stream_stepdown.js
@@ -5,134 +5,134 @@
* @tags: [requires_wiredtiger]
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
-
- const name = "change_stream_stepdown";
- const replTest = new ReplSetTest({name: name, nodes: [{}, {}]});
- replTest.startSet();
- replTest.initiate();
-
- const dbName = name;
- const collName = "change_stream_stepdown";
- const changeStreamComment = collName + "_comment";
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
- const primaryDb = primary.getDB(dbName);
- const secondaryDb = secondary.getDB(dbName);
- const primaryColl = primaryDb[collName];
-
- // Tell the secondary to stay secondary until we say otherwise.
- assert.commandWorked(secondaryDb.adminCommand({replSetFreeze: 999999}));
-
- // Open a change stream.
- let res = primaryDb.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {}}],
- cursor: {},
- comment: changeStreamComment,
- maxTimeMS: 5000
- });
- assert.commandWorked(res);
- let cursorId = res.cursor.id;
-
- // Insert several documents on primary and let them majority commit.
- assert.commandWorked(
- primaryColl.insert([{_id: 1}, {_id: 2}, {_id: 3}], {writeConcern: {w: "majority"}}));
- replTest.awaitReplication();
-
- jsTestLog("Testing that changestream survives stepdown between find and getmore");
- // Step down.
- assert.commandWorked(primaryDb.adminCommand({replSetStepDown: 60, force: true}));
- replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Receive the first change event. This tests stepdown between find and getmore.
- res = assert.commandWorked(
- primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
- let changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 1});
- assert.eq(changes[0]["operationType"], "insert");
-
- jsTestLog("Testing that changestream survives step-up");
- // Step back up and wait for primary.
- assert.commandWorked(primaryDb.adminCommand({replSetFreeze: 0}));
- replTest.getPrimary();
-
- // Get the next one. This tests that changestreams survives a step-up.
- res = assert.commandWorked(
- primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
- changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 2});
- assert.eq(changes[0]["operationType"], "insert");
-
- jsTestLog("Testing that changestream survives stepdown between two getmores");
- // Step down again.
- assert.commandWorked(primaryDb.adminCommand({replSetStepDown: 60, force: true}));
- replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Get the next one. This tests that changestreams survives a step down between getmores.
- res = assert.commandWorked(
- primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
- changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 3});
- assert.eq(changes[0]["operationType"], "insert");
-
- // Step back up and wait for primary.
- assert.commandWorked(primaryDb.adminCommand({replSetFreeze: 0}));
- replTest.getPrimary();
-
- jsTestLog("Testing that changestream waiting on old primary sees docs inserted on new primary");
-
- replTest.awaitReplication(); // Ensure secondary is up to date and can win an election.
- TestData.changeStreamComment = changeStreamComment;
- TestData.secondaryHost = secondary.host;
- TestData.dbName = dbName;
- TestData.collName = collName;
- let waitForShell = startParallelShell(function() {
- // Wait for the getMore to be in progress.
- assert.soon(
- () => db.getSiblingDB("admin")
- .aggregate([
- {'$currentOp': {}},
- {
- '$match': {
- op: 'getmore',
- 'cursor.originatingCommand.comment': TestData.changeStreamComment
- }
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+
+const name = "change_stream_stepdown";
+const replTest = new ReplSetTest({name: name, nodes: [{}, {}]});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = name;
+const collName = "change_stream_stepdown";
+const changeStreamComment = collName + "_comment";
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+const primaryDb = primary.getDB(dbName);
+const secondaryDb = secondary.getDB(dbName);
+const primaryColl = primaryDb[collName];
+
+// Tell the secondary to stay secondary until we say otherwise.
+assert.commandWorked(secondaryDb.adminCommand({replSetFreeze: 999999}));
+
+// Open a change stream.
+let res = primaryDb.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ comment: changeStreamComment,
+ maxTimeMS: 5000
+});
+assert.commandWorked(res);
+let cursorId = res.cursor.id;
+
+// Insert several documents on primary and let them majority commit.
+assert.commandWorked(
+ primaryColl.insert([{_id: 1}, {_id: 2}, {_id: 3}], {writeConcern: {w: "majority"}}));
+replTest.awaitReplication();
+
+jsTestLog("Testing that changestream survives stepdown between find and getmore");
+// Step down.
+assert.commandWorked(primaryDb.adminCommand({replSetStepDown: 60, force: true}));
+replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// Receive the first change event. This tests stepdown between find and getmore.
+res = assert.commandWorked(
+ primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
+let changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 1});
+assert.eq(changes[0]["operationType"], "insert");
+
+jsTestLog("Testing that changestream survives step-up");
+// Step back up and wait for primary.
+assert.commandWorked(primaryDb.adminCommand({replSetFreeze: 0}));
+replTest.getPrimary();
+
+// Get the next one. This tests that changestreams survives a step-up.
+res = assert.commandWorked(
+ primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
+changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 2});
+assert.eq(changes[0]["operationType"], "insert");
+
+jsTestLog("Testing that changestream survives stepdown between two getmores");
+// Step down again.
+assert.commandWorked(primaryDb.adminCommand({replSetStepDown: 60, force: true}));
+replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// Get the next one. This tests that changestreams survives a step down between getmores.
+res = assert.commandWorked(
+ primaryDb.runCommand({getMore: cursorId, collection: collName, batchSize: 1}));
+changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 3});
+assert.eq(changes[0]["operationType"], "insert");
+
+// Step back up and wait for primary.
+assert.commandWorked(primaryDb.adminCommand({replSetFreeze: 0}));
+replTest.getPrimary();
+
+jsTestLog("Testing that changestream waiting on old primary sees docs inserted on new primary");
+
+replTest.awaitReplication(); // Ensure secondary is up to date and can win an election.
+TestData.changeStreamComment = changeStreamComment;
+TestData.secondaryHost = secondary.host;
+TestData.dbName = dbName;
+TestData.collName = collName;
+let waitForShell = startParallelShell(function() {
+ // Wait for the getMore to be in progress.
+ assert.soon(
+ () => db.getSiblingDB("admin")
+ .aggregate([
+ {'$currentOp': {}},
+ {
+ '$match': {
+ op: 'getmore',
+ 'cursor.originatingCommand.comment': TestData.changeStreamComment
}
- ])
- .itcount() == 1);
-
- const secondary = new Mongo(TestData.secondaryHost);
- const secondaryDb = secondary.getDB(TestData.dbName);
- // Step down the old primary and wait for new primary.
- assert.commandWorked(secondaryDb.adminCommand({replSetFreeze: 0}));
- assert.commandWorked(secondaryDb.adminCommand({replSetStepUp: 1, skipDryRun: true}));
- jsTestLog("Waiting for new primary");
- assert.soon(() => secondaryDb.adminCommand({isMaster: 1}).ismaster);
-
- jsTestLog("Inserting document on new primary");
- assert.commandWorked(secondaryDb[TestData.collName].insert({_id: 4}),
- {writeConcern: {w: "majority"}});
- }, primary.port);
-
- res = assert.commandWorked(primaryDb.runCommand({
- getMore: cursorId,
- collection: collName,
- batchSize: 1,
- maxTimeMS: ReplSetTest.kDefaultTimeoutMS
- }));
- changes = res.cursor.nextBatch;
- assert.eq(changes.length, 1);
- assert.eq(changes[0]["fullDocument"], {_id: 4});
- assert.eq(changes[0]["operationType"], "insert");
-
- waitForShell();
-
- replTest.stopSet();
+ }
+ ])
+ .itcount() == 1);
+
+ const secondary = new Mongo(TestData.secondaryHost);
+ const secondaryDb = secondary.getDB(TestData.dbName);
+ // Step down the old primary and wait for new primary.
+ assert.commandWorked(secondaryDb.adminCommand({replSetFreeze: 0}));
+ assert.commandWorked(secondaryDb.adminCommand({replSetStepUp: 1, skipDryRun: true}));
+ jsTestLog("Waiting for new primary");
+ assert.soon(() => secondaryDb.adminCommand({isMaster: 1}).ismaster);
+
+ jsTestLog("Inserting document on new primary");
+ assert.commandWorked(secondaryDb[TestData.collName].insert({_id: 4}),
+ {writeConcern: {w: "majority"}});
+}, primary.port);
+
+res = assert.commandWorked(primaryDb.runCommand({
+ getMore: cursorId,
+ collection: collName,
+ batchSize: 1,
+ maxTimeMS: ReplSetTest.kDefaultTimeoutMS
+}));
+changes = res.cursor.nextBatch;
+assert.eq(changes.length, 1);
+assert.eq(changes[0]["fullDocument"], {_id: 4});
+assert.eq(changes[0]["operationType"], "insert");
+
+waitForShell();
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/clean_shutdown_oplog_state.js b/jstests/replsets/clean_shutdown_oplog_state.js
index 0bc4855f99a..35957ed44b3 100644
--- a/jstests/replsets/clean_shutdown_oplog_state.js
+++ b/jstests/replsets/clean_shutdown_oplog_state.js
@@ -5,101 +5,97 @@
//
// @tags: [requires_persistence, requires_majority_read_concern]
(function() {
- "use strict";
+"use strict";
- // Skip db hash check because secondary restarted as standalone.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check because secondary restarted as standalone.
+TestData.skipCheckDBHashes = true;
- var rst = new ReplSetTest({
- name: "name",
- nodes: 2,
- oplogSize: 500,
- });
+var rst = new ReplSetTest({
+ name: "name",
+ nodes: 2,
+ oplogSize: 500,
+});
- rst.startSet();
- var conf = rst.getReplSetConfig();
- conf.members[1].votes = 0;
- conf.members[1].priority = 0;
- printjson(conf);
- rst.initiate(conf);
+rst.startSet();
+var conf = rst.getReplSetConfig();
+conf.members[1].votes = 0;
+conf.members[1].priority = 0;
+printjson(conf);
+rst.initiate(conf);
- var primary = rst.getPrimary(); // Waits for PRIMARY state.
- var slave = rst.nodes[1];
+var primary = rst.getPrimary(); // Waits for PRIMARY state.
+var slave = rst.nodes[1];
- // Stop replication on the secondary.
- assert.commandWorked(
- slave.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+// Stop replication on the secondary.
+assert.commandWorked(slave.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
- // Prime the main collection.
- primary.getCollection("test.coll").insert({_id: -1});
+// Prime the main collection.
+primary.getCollection("test.coll").insert({_id: -1});
- // Start a w:2 write that will block until replication is resumed.
- var waitForReplStart = startParallelShell(function() {
- printjson(assert.writeOK(
- db.getCollection('side').insert({}, {writeConcern: {w: 2, wtimeout: 30 * 60 * 1000}})));
- }, primary.host.split(':')[1]);
+// Start a w:2 write that will block until replication is resumed.
+var waitForReplStart = startParallelShell(function() {
+ printjson(assert.writeOK(
+ db.getCollection('side').insert({}, {writeConcern: {w: 2, wtimeout: 30 * 60 * 1000}})));
+}, primary.host.split(':')[1]);
- // Insert a lot of data in increasing order to test.coll.
- var op = primary.getCollection("test.coll").initializeUnorderedBulkOp();
- for (var i = 0; i < 1000 * 1000; i++) {
- op.insert({_id: i});
- }
- assert.writeOK(op.execute());
+// Insert a lot of data in increasing order to test.coll.
+var op = primary.getCollection("test.coll").initializeUnorderedBulkOp();
+for (var i = 0; i < 1000 * 1000; i++) {
+ op.insert({_id: i});
+}
+assert.writeOK(op.execute());
- // Resume replication and wait for ops to start replicating, then do a clean shutdown on the
- // secondary.
- assert.commandWorked(slave.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- waitForReplStart();
- sleep(100); // wait a bit to increase the chances of killing mid-batch.
- rst.stop(1);
+// Resume replication and wait for ops to start replicating, then do a clean shutdown on the
+// secondary.
+assert.commandWorked(slave.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+waitForReplStart();
+sleep(100); // wait a bit to increase the chances of killing mid-batch.
+rst.stop(1);
- // Restart the secondary as a standalone node.
- var options = slave.savedOptions;
- options.noCleanData = true;
- delete options.replSet;
+// Restart the secondary as a standalone node.
+var options = slave.savedOptions;
+options.noCleanData = true;
+delete options.replSet;
- var storageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (storageEngine === "wiredTiger") {
- options.setParameter = options.setParameter || {};
- options.setParameter.recoverFromOplogAsStandalone = true;
- }
+var storageEngine = jsTest.options().storageEngine || "wiredTiger";
+if (storageEngine === "wiredTiger") {
+ options.setParameter = options.setParameter || {};
+ options.setParameter.recoverFromOplogAsStandalone = true;
+}
- var conn = MongoRunner.runMongod(options);
- assert.neq(null, conn, "secondary failed to start");
+var conn = MongoRunner.runMongod(options);
+assert.neq(null, conn, "secondary failed to start");
- // Following clean shutdown of a node, the oplog must exactly match the applied operations.
- // Additionally, the begin field must not be in the minValid document, the ts must match the
- // top of the oplog (SERVER-25353), and the oplogTruncateAfterPoint must be null (SERVER-7200
- // and SERVER-25071).
- var oplogDoc = conn.getCollection('local.oplog.rs')
- .find({ns: 'test.coll'})
- .sort({$natural: -1})
- .limit(1)[0];
- var collDoc = conn.getCollection('test.coll').find().sort({_id: -1}).limit(1)[0];
- var minValidDoc =
- conn.getCollection('local.replset.minvalid').find().sort({$natural: -1}).limit(1)[0];
- var oplogTruncateAfterPointDoc =
- conn.getCollection('local.replset.oplogTruncateAfterPoint').find().limit(1)[0];
- printjson({
- oplogDoc: oplogDoc,
- collDoc: collDoc,
- minValidDoc: minValidDoc,
- oplogTruncateAfterPointDoc: oplogTruncateAfterPointDoc
- });
- try {
- assert.eq(collDoc._id, oplogDoc.o._id);
- assert(!('begin' in minValidDoc), 'begin in minValidDoc');
- if (storageEngine !== "wiredTiger") {
- assert.eq(minValidDoc.ts, oplogDoc.ts);
- }
- assert.eq(oplogTruncateAfterPointDoc.oplogTruncateAfterPoint, Timestamp());
- } catch (e) {
- // TODO remove once SERVER-25777 is resolved.
- jsTest.log(
- "Look above and make sure clean shutdown finished without resorting to SIGKILL." +
- "\nUnfortunately that currently doesn't fail the test.");
- throw e;
+// Following clean shutdown of a node, the oplog must exactly match the applied operations.
+// Additionally, the begin field must not be in the minValid document, the ts must match the
+// top of the oplog (SERVER-25353), and the oplogTruncateAfterPoint must be null (SERVER-7200
+// and SERVER-25071).
+var oplogDoc =
+ conn.getCollection('local.oplog.rs').find({ns: 'test.coll'}).sort({$natural: -1}).limit(1)[0];
+var collDoc = conn.getCollection('test.coll').find().sort({_id: -1}).limit(1)[0];
+var minValidDoc =
+ conn.getCollection('local.replset.minvalid').find().sort({$natural: -1}).limit(1)[0];
+var oplogTruncateAfterPointDoc =
+ conn.getCollection('local.replset.oplogTruncateAfterPoint').find().limit(1)[0];
+printjson({
+ oplogDoc: oplogDoc,
+ collDoc: collDoc,
+ minValidDoc: minValidDoc,
+ oplogTruncateAfterPointDoc: oplogTruncateAfterPointDoc
+});
+try {
+ assert.eq(collDoc._id, oplogDoc.o._id);
+ assert(!('begin' in minValidDoc), 'begin in minValidDoc');
+ if (storageEngine !== "wiredTiger") {
+ assert.eq(minValidDoc.ts, oplogDoc.ts);
}
+ assert.eq(oplogTruncateAfterPointDoc.oplogTruncateAfterPoint, Timestamp());
+} catch (e) {
+ // TODO remove once SERVER-25777 is resolved.
+ jsTest.log("Look above and make sure clean shutdown finished without resorting to SIGKILL." +
+ "\nUnfortunately that currently doesn't fail the test.");
+ throw e;
+}
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/collate_id.js b/jstests/replsets/collate_id.js
index 6e6d56bc67d..588c02e979a 100644
--- a/jstests/replsets/collate_id.js
+++ b/jstests/replsets/collate_id.js
@@ -1,66 +1,68 @@
// Test that oplog application on the secondary happens correctly when the collection has a default
// collation and operations by _id which must respect the collation are issued.
(function() {
- "use strict";
+"use strict";
- Random.setRandomSeed();
+Random.setRandomSeed();
- // Return a string whose character at index 'i' in 'str' is replaced by 'character'.
- function replaceChar(str, i, character) {
- assert.eq(1, character.length);
- return str.substr(0, i) + character + str.substr(i + 1);
- }
+// Return a string whose character at index 'i' in 'str' is replaced by 'character'.
+function replaceChar(str, i, character) {
+ assert.eq(1, character.length);
+ return str.substr(0, i) + character + str.substr(i + 1);
+}
- // Return a string whose character at index 'i' has been uppercased.
- function uppercaseIth(str, i) {
- return replaceChar(str, i, str[i].toUpperCase());
- }
+// Return a string whose character at index 'i' has been uppercased.
+function uppercaseIth(str, i) {
+ return replaceChar(str, i, str[i].toUpperCase());
+}
- const caseInsensitive = {collation: {locale: "en_US", strength: 2}};
+const caseInsensitive = {
+ collation: {locale: "en_US", strength: 2}
+};
- var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
- var nodes = replTest.startSet();
- replTest.initiate();
+var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
+var nodes = replTest.startSet();
+replTest.initiate();
- var primary = replTest.getPrimary();
- var primaryDB = primary.getDB("test");
- var primaryColl = primaryDB.collate_id;
+var primary = replTest.getPrimary();
+var primaryDB = primary.getDB("test");
+var primaryColl = primaryDB.collate_id;
- var secondary = replTest.getSecondary();
- var secondaryDB = secondary.getDB("test");
- var secondaryColl = secondaryDB.collate_id;
+var secondary = replTest.getSecondary();
+var secondaryDB = secondary.getDB("test");
+var secondaryColl = secondaryDB.collate_id;
- // Stop the secondary from syncing. This will ensure that the writes on the primary get applied
- // on the secondary in a large batch.
- assert.commandWorked(
- secondaryDB.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+// Stop the secondary from syncing. This will ensure that the writes on the primary get applied
+// on the secondary in a large batch.
+assert.commandWorked(
+ secondaryDB.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- assert.commandWorked(primaryDB.createCollection(primaryColl.getName(), caseInsensitive));
+assert.commandWorked(primaryDB.createCollection(primaryColl.getName(), caseInsensitive));
- // A string of the character 'b' repeated.
- const baseStr = new Array(50).join("b");
+// A string of the character 'b' repeated.
+const baseStr = new Array(50).join("b");
- for (var i = 0; i < 1000; i++) {
- // Make an _id by uppercasing each character in "baseStr" with 0.5 probability.
- var strId = baseStr;
- for (var charIdx = 0; charIdx < baseStr.length; charIdx++) {
- if (Random.rand() < 0.5) {
- strId = uppercaseIth(strId, charIdx);
- }
+for (var i = 0; i < 1000; i++) {
+ // Make an _id by uppercasing each character in "baseStr" with 0.5 probability.
+ var strId = baseStr;
+ for (var charIdx = 0; charIdx < baseStr.length; charIdx++) {
+ if (Random.rand() < 0.5) {
+ strId = uppercaseIth(strId, charIdx);
}
-
- assert.writeOK(primaryColl.insert({_id: strId}));
- assert.writeOK(primaryColl.remove({_id: strId}));
}
- // Since the inserts and deletes happen in pairs, we should be left with an empty collection on
- // the primary.
- assert.eq(0, primaryColl.find().itcount());
+ assert.writeOK(primaryColl.insert({_id: strId}));
+ assert.writeOK(primaryColl.remove({_id: strId}));
+}
+
+// Since the inserts and deletes happen in pairs, we should be left with an empty collection on
+// the primary.
+assert.eq(0, primaryColl.find().itcount());
- // Allow the secondary to sync, and test that it also ends up with an empty collection.
- assert.commandWorked(
- secondaryDB.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- replTest.awaitReplication();
- assert.eq(0, secondaryColl.find().itcount());
- replTest.stopSet();
+// Allow the secondary to sync, and test that it also ends up with an empty collection.
+assert.commandWorked(
+ secondaryDB.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+replTest.awaitReplication();
+assert.eq(0, secondaryColl.find().itcount());
+replTest.stopSet();
})();
diff --git a/jstests/replsets/command_response_operation_time.js b/jstests/replsets/command_response_operation_time.js
index d4aecdb2248..a9ae4d6ef70 100644
--- a/jstests/replsets/command_response_operation_time.js
+++ b/jstests/replsets/command_response_operation_time.js
@@ -5,58 +5,57 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- function assertCorrectOperationTime(operationTime, expectedTimestamp, opTimeType) {
- assert.eq(0,
- timestampCmp(operationTime, expectedTimestamp),
- "operationTime in command response, " + operationTime +
- ", does not equal the last " + opTimeType + " timestamp, " +
- expectedTimestamp);
- }
-
- var name = "command_response_operation_time";
-
- var replTest = new ReplSetTest(
- {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ""}, waitForKeys: true});
-
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- replTest.stopSet();
- return;
- }
- replTest.initiate();
-
- var res, statusRes;
- var testDB = replTest.getPrimary().getDB(name);
-
- jsTestLog("Executing majority write.");
- res = assert.commandWorked(
- testDB.runCommand({insert: "foo", documents: [{x: 1}], writeConcern: {w: "majority"}}));
- statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assertCorrectOperationTime(
- res.operationTime, statusRes.optimes.lastCommittedOpTime.ts, "committed");
-
- jsTestLog("Executing local write.");
- res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 2}]}));
- statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assertCorrectOperationTime(res.operationTime, statusRes.optimes.appliedOpTime.ts, "applied");
-
- replTest.awaitLastOpCommitted();
-
- jsTestLog("Executing majority read.");
- res = assert.commandWorked(
- testDB.runCommand({find: "foo", filter: {x: 1}, readConcern: {level: "majority"}}));
- statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assertCorrectOperationTime(
- res.operationTime, statusRes.optimes.lastCommittedOpTime.ts, "committed");
-
- jsTestLog("Executing local read.");
- res = assert.commandWorked(testDB.runCommand({find: "foo", filter: {x: 1}}));
- statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
- assertCorrectOperationTime(res.operationTime, statusRes.optimes.appliedOpTime.ts, "applied");
+"use strict";
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+function assertCorrectOperationTime(operationTime, expectedTimestamp, opTimeType) {
+ assert.eq(0,
+ timestampCmp(operationTime, expectedTimestamp),
+ "operationTime in command response, " + operationTime + ", does not equal the last " +
+ opTimeType + " timestamp, " + expectedTimestamp);
+}
+
+var name = "command_response_operation_time";
+
+var replTest = new ReplSetTest(
+ {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ""}, waitForKeys: true});
+
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
replTest.stopSet();
+ return;
+}
+replTest.initiate();
+
+var res, statusRes;
+var testDB = replTest.getPrimary().getDB(name);
+
+jsTestLog("Executing majority write.");
+res = assert.commandWorked(
+ testDB.runCommand({insert: "foo", documents: [{x: 1}], writeConcern: {w: "majority"}}));
+statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+assertCorrectOperationTime(
+ res.operationTime, statusRes.optimes.lastCommittedOpTime.ts, "committed");
+
+jsTestLog("Executing local write.");
+res = assert.commandWorked(testDB.runCommand({insert: "foo", documents: [{x: 2}]}));
+statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+assertCorrectOperationTime(res.operationTime, statusRes.optimes.appliedOpTime.ts, "applied");
+
+replTest.awaitLastOpCommitted();
+
+jsTestLog("Executing majority read.");
+res = assert.commandWorked(
+ testDB.runCommand({find: "foo", filter: {x: 1}, readConcern: {level: "majority"}}));
+statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+assertCorrectOperationTime(
+ res.operationTime, statusRes.optimes.lastCommittedOpTime.ts, "committed");
+
+jsTestLog("Executing local read.");
+res = assert.commandWorked(testDB.runCommand({find: "foo", filter: {x: 1}}));
+statusRes = assert.commandWorked(testDB.adminCommand({replSetGetStatus: 1}));
+assertCorrectOperationTime(res.operationTime, statusRes.optimes.appliedOpTime.ts, "applied");
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/commands_that_write_accept_wc.js b/jstests/replsets/commands_that_write_accept_wc.js
index e99ef63ddba..4a35cb747f1 100644
--- a/jstests/replsets/commands_that_write_accept_wc.js
+++ b/jstests/replsets/commands_that_write_accept_wc.js
@@ -7,186 +7,186 @@
*/
(function() {
- "use strict";
- var replTest = new ReplSetTest({
- name: 'WCSet',
- // Set priority of secondaries to zero to prevent spurious elections.
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- var dbName = "wc-test";
- var db = master.getDB(dbName);
- var collName = 'leaves';
- var coll = db[collName];
-
- function dropTestCollection() {
- replTest.awaitReplication();
- coll.drop();
- assert.eq(0, coll.find().itcount(), "test collection not empty");
+"use strict";
+var replTest = new ReplSetTest({
+ name: 'WCSet',
+ // Set priority of secondaries to zero to prevent spurious elections.
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+var dbName = "wc-test";
+var db = master.getDB(dbName);
+var collName = 'leaves';
+var coll = db[collName];
+
+function dropTestCollection() {
+ replTest.awaitReplication();
+ coll.drop();
+ assert.eq(0, coll.find().itcount(), "test collection not empty");
+}
+
+dropTestCollection();
+
+var commands = [];
+
+commands.push({
+ req: {insert: collName, documents: [{type: 'maple'}]},
+ setupFunc: function() {},
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'maple'}), 1);
}
-
- dropTestCollection();
-
- var commands = [];
-
- commands.push({
- req: {insert: collName, documents: [{type: 'maple'}]},
- setupFunc: function() {},
- confirmFunc: function() {
- assert.eq(coll.count({type: 'maple'}), 1);
- }
- });
-
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.getIndexes().length, 1);
- },
- confirmFunc: function() {
- assert.eq(coll.getIndexes().length, 2);
- }
- });
-
- commands.push({
- req: {
- update: collName,
- updates: [{
- q: {type: 'oak'},
- u: [{$set: {type: 'ginkgo'}}],
- }],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: {$set: {type: 'ginkgo'}},
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {
- findAndModify: collName,
- query: {type: 'oak'},
- update: [{$set: {type: 'ginkgo'}}],
- writeConcern: {w: 'majority'}
- },
- setupFunc: function() {
- coll.insert({type: 'oak'});
- assert.eq(coll.count({type: 'ginkgo'}), 0);
- assert.eq(coll.count({type: 'oak'}), 1);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'ginkgo'}), 1);
- assert.eq(coll.count({type: 'oak'}), 0);
- }
- });
-
- commands.push({
- req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]},
- setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
- assert.eq(coll.count({type: 'willow'}), 0);
- },
- confirmFunc: function() {
- assert.eq(coll.count({type: 'willow'}), 1);
- }
- });
-
- commands.push({
- req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
- setupFunc: function() {
- coll.insert({_id: 1, type: 'oak'});
- coll.insert({_id: 2, type: 'maple'});
- },
- confirmFunc: function() {
- assert.eq(db.foo.count({type: 'oak'}), 1);
- assert.eq(db.foo.count({type: 'maple'}), 1);
- db.foo.drop();
- }
- });
-
- commands.push({
- req: {
- mapReduce: collName,
- map: function() {
- this.tags.forEach(function(z) {
- emit(z, 1);
- });
- },
- reduce: function(key, values) {
- return {count: values.length};
- },
- out: "foo"
+});
+
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]},
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.getIndexes().length, 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.getIndexes().length, 2);
+ }
+});
+
+commands.push({
+ req: {
+ update: collName,
+ updates: [{
+ q: {type: 'oak'},
+ u: [{$set: {type: 'ginkgo'}}],
+ }],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: {$set: {type: 'ginkgo'}},
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {
+ findAndModify: collName,
+ query: {type: 'oak'},
+ update: [{$set: {type: 'ginkgo'}}],
+ writeConcern: {w: 'majority'}
+ },
+ setupFunc: function() {
+ coll.insert({type: 'oak'});
+ assert.eq(coll.count({type: 'ginkgo'}), 0);
+ assert.eq(coll.count({type: 'oak'}), 1);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'ginkgo'}), 1);
+ assert.eq(coll.count({type: 'oak'}), 0);
+ }
+});
+
+commands.push({
+ req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]},
+ setupFunc: function() {
+ coll.insert({_id: 1, type: 'oak'});
+ assert.eq(coll.count({type: 'willow'}), 0);
+ },
+ confirmFunc: function() {
+ assert.eq(coll.count({type: 'willow'}), 1);
+ }
+});
+
+commands.push({
+ req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}},
+ setupFunc: function() {
+ coll.insert({_id: 1, type: 'oak'});
+ coll.insert({_id: 2, type: 'maple'});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.count({type: 'oak'}), 1);
+ assert.eq(db.foo.count({type: 'maple'}), 1);
+ db.foo.drop();
+ }
+});
+
+commands.push({
+ req: {
+ mapReduce: collName,
+ map: function() {
+ this.tags.forEach(function(z) {
+ emit(z, 1);
+ });
},
- setupFunc: function() {
- coll.insert({x: 1, tags: ["a", "b"]});
- coll.insert({x: 2, tags: ["b", "c"]});
- coll.insert({x: 3, tags: ["c", "a"]});
- coll.insert({x: 4, tags: ["b", "c"]});
+ reduce: function(key, values) {
+ return {count: values.length};
},
- confirmFunc: function() {
- assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
- assert.eq(db.foo.findOne({_id: 'b'}).value.count, 3);
- assert.eq(db.foo.findOne({_id: 'c'}).value.count, 3);
- db.foo.drop();
- }
- });
-
- function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropTestCollection();
- cmd.setupFunc();
- var res = db.runCommand(cmd.req);
- assert.commandWorked(res);
- assert(!res.writeConcernError,
- 'command on a full replicaset had writeConcernError: ' + tojson(res));
- cmd.confirmFunc();
+ out: "foo"
+ },
+ setupFunc: function() {
+ coll.insert({x: 1, tags: ["a", "b"]});
+ coll.insert({x: 2, tags: ["b", "c"]});
+ coll.insert({x: 3, tags: ["c", "a"]});
+ coll.insert({x: 4, tags: ["b", "c"]});
+ },
+ confirmFunc: function() {
+ assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2);
+ assert.eq(db.foo.findOne({_id: 'b'}).value.count, 3);
+ assert.eq(db.foo.findOne({_id: 'c'}).value.count, 3);
+ db.foo.drop();
}
+});
- function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'invalid'};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropTestCollection();
- cmd.setupFunc();
- var res = coll.runCommand(cmd.req);
- assert.commandFailedWithCode(res, ErrorCodes.UnknownReplWriteConcern);
- cmd.confirmFunc();
- }
+function testValidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS};
+ jsTest.log("Testing " + tojson(cmd.req));
- commands.forEach(function(cmd) {
- testValidWriteConcern(cmd);
- testInvalidWriteConcern(cmd);
- });
+ dropTestCollection();
+ cmd.setupFunc();
+ var res = db.runCommand(cmd.req);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError,
+ 'command on a full replicaset had writeConcernError: ' + tojson(res));
+ cmd.confirmFunc();
+}
+
+function testInvalidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'invalid'};
+ jsTest.log("Testing " + tojson(cmd.req));
- replTest.stopSet();
+ dropTestCollection();
+ cmd.setupFunc();
+ var res = coll.runCommand(cmd.req);
+ assert.commandFailedWithCode(res, ErrorCodes.UnknownReplWriteConcern);
+ cmd.confirmFunc();
+}
+
+commands.forEach(function(cmd) {
+ testValidWriteConcern(cmd);
+ testInvalidWriteConcern(cmd);
+});
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js b/jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js
index 71ca1189b45..56b156d8289 100644
--- a/jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js
+++ b/jstests/replsets/commit_prepared_transaction_before_stable_timestamp.js
@@ -5,55 +5,55 @@
*/
(function() {
- "use strict";
- load("jstests/aggregation/extras/utils.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/aggregation/extras/utils.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
+const primary = replTest.getPrimary();
- const dbName = "test";
- const collName = "commit_prepared_transaction_before_stable_timestamp";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "commit_prepared_transaction_before_stable_timestamp";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- // Make sure there is no lag between the oldest timestamp and the stable timestamp so we can
- // test that committing a prepared transaction behind the oldest timestamp succeeds.
- assert.commandWorked(primary.adminCommand({
- "configureFailPoint": 'WTSetOldestTSToStableTS',
- "mode": 'alwaysOn',
- }));
+// Make sure there is no lag between the oldest timestamp and the stable timestamp so we can
+// test that committing a prepared transaction behind the oldest timestamp succeeds.
+assert.commandWorked(primary.adminCommand({
+ "configureFailPoint": 'WTSetOldestTSToStableTS',
+ "mode": 'alwaysOn',
+}));
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
- // Doing a majority write after preparing the transaction ensures that the stable timestamp is
- // past the prepare timestamp because this write must be in the committed snapshot.
- assert.commandWorked(
- testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
+jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
+// Doing a majority write after preparing the transaction ensures that the stable timestamp is
+// past the prepare timestamp because this write must be in the committed snapshot.
+assert.commandWorked(
+ testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
- jsTestLog("Committing the transaction before the stable timestamp");
+jsTestLog("Committing the transaction before the stable timestamp");
- // Since we have advanced the stableTimestamp to be after the prepareTimestamp, when we commit
- // at the prepareTimestamp, we are certain that we are committing behind the stableTimestamp.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+// Since we have advanced the stableTimestamp to be after the prepareTimestamp, when we commit
+// at the prepareTimestamp, we are certain that we are committing behind the stableTimestamp.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // Make sure we can see the insert from the prepared transaction.
- arrayEq(sessionColl.find().toArray(), [{_id: 1}, {_id: 2}]);
+// Make sure we can see the insert from the prepared transaction.
+arrayEq(sessionColl.find().toArray(), [{_id: 1}, {_id: 2}]);
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'WTSetOldestTSToStableTS', mode: 'off'}));
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'WTSetOldestTSToStableTS', mode: 'off'}));
- replTest.stopSet();
+replTest.stopSet();
}()); \ No newline at end of file
diff --git a/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js b/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js
index 12c02f9d642..606e3bc5019 100644
--- a/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js
+++ b/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js
@@ -13,88 +13,88 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "commit_transaction_initial_sync_data_already_applied";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "commit_transaction_initial_sync_data_already_applied";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testColl.insert({_id: 1, a: 0}));
+assert.commandWorked(testColl.insert({_id: 1, a: 0}));
- // Ensure that the "a" field is unique
- assert.commandWorked(testColl.createIndex({"a": 1}, {unique: true}));
+// Ensure that the "a" field is unique
+assert.commandWorked(testColl.createIndex({"a": 1}, {unique: true}));
- jsTestLog("Restarting the secondary");
+jsTestLog("Restarting the secondary");
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync before cloning any
- // collections, but during the period that the sync source is fetching oplog entries from the
- // sync source. This will make it so that all operations after this and before the failpoint is
- // turned off will be reflected in the data but also applied during the oplog application phase
- // of initial sync.
- secondary = replTest.restart(secondary, {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangBeforeCopyingDatabases': tojson({mode: 'alwaysOn'}),
- 'numInitialSyncAttempts': 1
- }
- });
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync before cloning any
+// collections, but during the period that the sync source is fetching oplog entries from the
+// sync source. This will make it so that all operations after this and before the failpoint is
+// turned off will be reflected in the data but also applied during the oplog application phase
+// of initial sync.
+secondary = replTest.restart(secondary, {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangBeforeCopyingDatabases': tojson({mode: 'alwaysOn'}),
+ 'numInitialSyncAttempts': 1
+ }
+});
- // Wait for fail point message to be logged so that we know that initial sync is paused.
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+// Wait for fail point message to be logged so that we know that initial sync is paused.
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
- jsTestLog("Initial sync paused");
+jsTestLog("Initial sync paused");
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- assert.commandWorked(testColl.update({_id: 1}, {_id: 1, a: 0, b: 0}));
+assert.commandWorked(testColl.update({_id: 1}, {_id: 1, a: 0, b: 0}));
- session.startTransaction();
+session.startTransaction();
- // When the commitTransaction oplog entry is applied, this operation should fail with a
- // duplicate key error because the data will already reflect the transaction.
- assert.commandWorked(sessionColl.insert({_id: 2, a: 1}));
+// When the commitTransaction oplog entry is applied, this operation should fail with a
+// duplicate key error because the data will already reflect the transaction.
+assert.commandWorked(sessionColl.insert({_id: 2, a: 1}));
- // When the commitTransaction oplog entry is applied, this operation should succeed even though
- // the one before it fails. This is used to make sure that initial sync is applying operations
- // from a transaction in a separate storage transaction.
- assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {b: 1}}));
+// When the commitTransaction oplog entry is applied, this operation should succeed even though
+// the one before it fails. This is used to make sure that initial sync is applying operations
+// from a transaction in a separate storage transaction.
+assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {b: 1}}));
- assert.commandWorked(sessionColl.update({_id: 2}, {$unset: {a: 1}}));
- assert.commandWorked(sessionColl.insert({_id: 3, a: 1}));
+assert.commandWorked(sessionColl.update({_id: 2}, {$unset: {a: 1}}));
+assert.commandWorked(sessionColl.insert({_id: 3, a: 1}));
- jsTestLog("Preparing and committing a transaction");
+jsTestLog("Preparing and committing a transaction");
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangBeforeCopyingDatabases", mode: "off"}));
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangBeforeCopyingDatabases", mode: "off"}));
- // Wait for the secondary to complete initial sync.
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+// Wait for the secondary to complete initial sync.
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Initial sync completed");
+jsTestLog("Initial sync completed");
- // Make sure that the later operations from the transaction succeed even though the first
- // operation will fail during oplog application.
- let res = secondary.getDB(dbName).getCollection(collName).find();
- assert.eq(res.toArray(), [{_id: 1, a: 0}, {_id: 2}, {_id: 3, a: 1}], res);
+// Make sure that the later operations from the transaction succeed even though the first
+// operation will fail during oplog application.
+let res = secondary.getDB(dbName).getCollection(collName).find();
+assert.eq(res.toArray(), [{_id: 1, a: 0}, {_id: 2}, {_id: 3, a: 1}], res);
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/commit_transaction_recovery.js b/jstests/replsets/commit_transaction_recovery.js
index a1a12e8c183..cdd2c49a830 100644
--- a/jstests/replsets/commit_transaction_recovery.js
+++ b/jstests/replsets/commit_transaction_recovery.js
@@ -7,64 +7,64 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- let primary = replTest.getPrimary();
+let primary = replTest.getPrimary();
- const dbName = "test";
- const collName = "commit_transaction_recovery";
- let testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "commit_transaction_recovery";
+let testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- testDB.runCommand({drop: collName});
- assert.commandWorked(testDB.runCommand({create: collName}));
+testDB.runCommand({drop: collName});
+assert.commandWorked(testDB.runCommand({create: collName}));
- let session = primary.startSession({causalConsistency: false});
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+let session = primary.startSession({causalConsistency: false});
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- jsTestLog("Disable snapshotting on all nodes");
- // Disable snapshotting so that future operations do not enter the majority snapshot.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
+jsTestLog("Disable snapshotting on all nodes");
+// Disable snapshotting so that future operations do not enter the majority snapshot.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
- jsTestLog("Committing the transaction");
- // Since the commitTimestamp is after the last snapshot, this oplog entry will be replayed
- // during replication recovery during restart.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+jsTestLog("Committing the transaction");
+// Since the commitTimestamp is after the last snapshot, this oplog entry will be replayed
+// during replication recovery during restart.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- jsTestLog("Restarting node");
+jsTestLog("Restarting node");
- // Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
- // unset on the node following the restart.
- replTest.restart(primary);
+// Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
+// unset on the node following the restart.
+replTest.restart(primary);
- jsTestLog("Node was restarted");
+jsTestLog("Node was restarted");
- primary = replTest.getPrimary();
- testDB = primary.getDB(dbName);
- session = primary.startSession({causalConsistency: false});
- sessionDB = session.getDatabase(dbName);
- session.startTransaction();
+primary = replTest.getPrimary();
+testDB = primary.getDB(dbName);
+session = primary.startSession({causalConsistency: false});
+sessionDB = session.getDatabase(dbName);
+session.startTransaction();
- // Make sure that we can read the document from the transaction after recovery.
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1});
+// Make sure that we can read the document from the transaction after recovery.
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1});
- // Make sure that another write on the same document from the transaction has no write conflict.
- // Also, make sure that we can run another transaction after recovery without any problems.
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
+// Make sure that another write on the same document from the transaction has no write conflict.
+// Also, make sure that we can run another transaction after recovery without any problems.
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/dbcheck.js b/jstests/replsets/dbcheck.js
index 07b268bb0ec..99ab6261b00 100644
--- a/jstests/replsets/dbcheck.js
+++ b/jstests/replsets/dbcheck.js
@@ -5,103 +5,99 @@
*/
(function() {
- "use strict";
+"use strict";
- // TODO(SERVER-31323): Re-enable when existing dbCheck issues are fixed.
- if (true)
- return;
+// TODO(SERVER-31323): Re-enable when existing dbCheck issues are fixed.
+if (true)
+ return;
- let nodeCount = 3;
- let replSet = new ReplSetTest({name: "dbCheckSet", nodes: nodeCount});
+let nodeCount = 3;
+let replSet = new ReplSetTest({name: "dbCheckSet", nodes: nodeCount});
- replSet.startSet();
- replSet.initiate();
- replSet.awaitSecondaryNodes();
+replSet.startSet();
+replSet.initiate();
+replSet.awaitSecondaryNodes();
- function forEachSecondary(f) {
- for (let secondary of replSet.getSecondaries()) {
- f(secondary);
- }
+function forEachSecondary(f) {
+ for (let secondary of replSet.getSecondaries()) {
+ f(secondary);
}
+}
- function forEachNode(f) {
- f(replSet.getPrimary());
- forEachSecondary(f);
- }
+function forEachNode(f) {
+ f(replSet.getPrimary());
+ forEachSecondary(f);
+}
- let dbName = "dbCheck-test";
- let collName = "dbcheck-collection";
+let dbName = "dbCheck-test";
+let collName = "dbcheck-collection";
- // Clear local.system.healthlog.
- function clearLog() {
- forEachNode(conn => conn.getDB("local").system.healthlog.drop());
- }
+// Clear local.system.healthlog.
+function clearLog() {
+ forEachNode(conn => conn.getDB("local").system.healthlog.drop());
+}
- function addEnoughForMultipleBatches(collection) {
- collection.insertMany([...Array(10000).keys()].map(x => ({_id: x})));
- }
+function addEnoughForMultipleBatches(collection) {
+ collection.insertMany([...Array(10000).keys()].map(x => ({_id: x})));
+}
- // Name for a collection which takes multiple batches to check and which shouldn't be modified
- // by any of the tests.
- let multiBatchSimpleCollName = "dbcheck-simple-collection";
- addEnoughForMultipleBatches(replSet.getPrimary().getDB(dbName)[multiBatchSimpleCollName]);
+// Name for a collection which takes multiple batches to check and which shouldn't be modified
+// by any of the tests.
+let multiBatchSimpleCollName = "dbcheck-simple-collection";
+addEnoughForMultipleBatches(replSet.getPrimary().getDB(dbName)[multiBatchSimpleCollName]);
- function dbCheckCompleted(db) {
- return db.currentOp().inprog.filter(x => x["desc"] == "dbCheck")[0] === undefined;
- }
+function dbCheckCompleted(db) {
+ return db.currentOp().inprog.filter(x => x["desc"] == "dbCheck")[0] === undefined;
+}
- // Wait for dbCheck to complete (on both primaries and secondaries). Fails an assertion if
- // dbCheck takes longer than maxMs.
- function awaitDbCheckCompletion(db) {
- let start = Date.now();
+// Wait for dbCheck to complete (on both primaries and secondaries). Fails an assertion if
+// dbCheck takes longer than maxMs.
+function awaitDbCheckCompletion(db) {
+ let start = Date.now();
- assert.soon(() => dbCheckCompleted(db), "dbCheck timed out");
- replSet.awaitSecondaryNodes();
- replSet.awaitReplication();
+ assert.soon(() => dbCheckCompleted(db), "dbCheck timed out");
+ replSet.awaitSecondaryNodes();
+ replSet.awaitReplication();
- // Give the health log buffers some time to flush.
- sleep(100);
- }
+ // Give the health log buffers some time to flush.
+ sleep(100);
+}
- // Check that everything in the health log shows a successful and complete check with no found
- // inconsistencies.
- function checkLogAllConsistent(conn) {
- let healthlog = conn.getDB("local").system.healthlog;
+// Check that everything in the health log shows a successful and complete check with no found
+// inconsistencies.
+function checkLogAllConsistent(conn) {
+ let healthlog = conn.getDB("local").system.healthlog;
- assert(healthlog.find().count(), "dbCheck put no batches in health log");
+ assert(healthlog.find().count(), "dbCheck put no batches in health log");
- let maxResult = healthlog.aggregate([
- {$match: {operation: "dbCheckBatch"}},
- {$group: {_id: 1, key: {$max: "$data.maxKey"}}}
- ]);
+ let maxResult = healthlog.aggregate(
+ [{$match: {operation: "dbCheckBatch"}}, {$group: {_id: 1, key: {$max: "$data.maxKey"}}}]);
- assert(maxResult.hasNext(), "dbCheck put no batches in health log");
- assert.eq(maxResult.next().key, {"$maxKey": 1}, "dbCheck batches should end at MaxKey");
+ assert(maxResult.hasNext(), "dbCheck put no batches in health log");
+ assert.eq(maxResult.next().key, {"$maxKey": 1}, "dbCheck batches should end at MaxKey");
- let minResult = healthlog.aggregate([
- {$match: {operation: "dbCheckBatch"}},
- {$group: {_id: 1, key: {$min: "$data.minKey"}}}
- ]);
+ let minResult = healthlog.aggregate(
+ [{$match: {operation: "dbCheckBatch"}}, {$group: {_id: 1, key: {$min: "$data.minKey"}}}]);
- assert(minResult.hasNext(), "dbCheck put no batches in health log");
- assert.eq(minResult.next().key, {"$minKey": 1}, "dbCheck batches should start at MinKey");
+ assert(minResult.hasNext(), "dbCheck put no batches in health log");
+ assert.eq(minResult.next().key, {"$minKey": 1}, "dbCheck batches should start at MinKey");
- // Assert no errors (i.e., found inconsistencies).
- let errs = healthlog.find({"severity": {"$ne": "info"}});
- if (errs.hasNext()) {
- assert(false, "dbCheck found inconsistency: " + tojson(errs.next()));
- }
+ // Assert no errors (i.e., found inconsistencies).
+ let errs = healthlog.find({"severity": {"$ne": "info"}});
+ if (errs.hasNext()) {
+ assert(false, "dbCheck found inconsistency: " + tojson(errs.next()));
+ }
- // Assert no failures (i.e., checks that failed to complete).
- let failedChecks = healthlog.find({"operation": "dbCheckBatch", "data.success": false});
- if (failedChecks.hasNext()) {
- assert(false, "dbCheck batch failed: " + tojson(failedChecks.next()));
- }
+ // Assert no failures (i.e., checks that failed to complete).
+ let failedChecks = healthlog.find({"operation": "dbCheckBatch", "data.success": false});
+ if (failedChecks.hasNext()) {
+ assert(false, "dbCheck batch failed: " + tojson(failedChecks.next()));
+ }
- // Finds an entry with data.minKey === MinKey, and then matches its maxKey against
- // another document's minKey, and so on, and then checks that the result of that search
- // has data.maxKey === MaxKey.
- let completeCoverage = healthlog.aggregate([
+ // Finds an entry with data.minKey === MinKey, and then matches its maxKey against
+ // another document's minKey, and so on, and then checks that the result of that search
+ // has data.maxKey === MaxKey.
+ let completeCoverage = healthlog.aggregate([
{$match: {"operation": "dbCheckBatch", "data.minKey": MinKey}},
{
$graphLookup: {
@@ -116,335 +112,330 @@
{$match: {"batchLimits.data.maxKey": MaxKey}}
]);
- assert(completeCoverage.hasNext(), "dbCheck batches do not cover full key range");
- }
+ assert(completeCoverage.hasNext(), "dbCheck batches do not cover full key range");
+}
+
+// Check that the total of all batches in the health log on `conn` is equal to the total number
+// of documents and bytes in `coll`.
+
+// Returns a document with fields "totalDocs" and "totalBytes", representing the total size of
+// the batches in the health log.
+function healthLogCounts(healthlog) {
+ let result = healthlog.aggregate([
+ {$match: {"operation": "dbCheckBatch"}},
+ {
+ $group: {
+ "_id": null,
+ "totalDocs": {$sum: "$data.count"},
+ "totalBytes": {$sum: "$data.bytes"}
+ }
+ }
+ ]);
- // Check that the total of all batches in the health log on `conn` is equal to the total number
- // of documents and bytes in `coll`.
+ assert(result.hasNext(), "dbCheck put no batches in health log");
- // Returns a document with fields "totalDocs" and "totalBytes", representing the total size of
- // the batches in the health log.
- function healthLogCounts(healthlog) {
- let result = healthlog.aggregate([
- {$match: {"operation": "dbCheckBatch"}},
- {
- $group: {
- "_id": null,
- "totalDocs": {$sum: "$data.count"},
- "totalBytes": {$sum: "$data.bytes"}
- }
- }
- ]);
+ return result.next();
+}
- assert(result.hasNext(), "dbCheck put no batches in health log");
+function checkTotalCounts(conn, coll) {
+ let result = healthLogCounts(conn.getDB("local").system.healthlog);
- return result.next();
- }
+ assert.eq(result.totalDocs, coll.count(), "dbCheck batches do not count all documents");
- function checkTotalCounts(conn, coll) {
- let result = healthLogCounts(conn.getDB("local").system.healthlog);
+ // Calculate the size on the client side, because collection.dataSize is not necessarily the
+ // sum of the document sizes.
+ let size = coll.find().toArray().reduce((x, y) => x + bsonsize(y), 0);
- assert.eq(result.totalDocs, coll.count(), "dbCheck batches do not count all documents");
+ assert.eq(result.totalBytes, size, "dbCheck batches do not count all bytes");
+}
- // Calculate the size on the client side, because collection.dataSize is not necessarily the
- // sum of the document sizes.
- let size = coll.find().toArray().reduce((x, y) => x + bsonsize(y), 0);
+// First check behavior when everything is consistent.
+function simpleTestConsistent() {
+ let master = replSet.getPrimary();
+ clearLog();
- assert.eq(result.totalBytes, size, "dbCheck batches do not count all bytes");
- }
+ assert.neq(master, undefined);
+ let db = master.getDB(dbName);
+ assert.commandWorked(db.runCommand({"dbCheck": multiBatchSimpleCollName}));
- // First check behavior when everything is consistent.
- function simpleTestConsistent() {
- let master = replSet.getPrimary();
- clearLog();
+ awaitDbCheckCompletion(db);
- assert.neq(master, undefined);
- let db = master.getDB(dbName);
- assert.commandWorked(db.runCommand({"dbCheck": multiBatchSimpleCollName}));
+ checkLogAllConsistent(master);
+ checkTotalCounts(master, db[multiBatchSimpleCollName]);
- awaitDbCheckCompletion(db);
+ forEachSecondary(function(secondary) {
+ checkLogAllConsistent(secondary);
+ checkTotalCounts(secondary, secondary.getDB(dbName)[multiBatchSimpleCollName]);
+ });
+}
- checkLogAllConsistent(master);
- checkTotalCounts(master, db[multiBatchSimpleCollName]);
+// Same thing, but now with concurrent updates.
+function concurrentTestConsistent() {
+ let master = replSet.getPrimary();
- forEachSecondary(function(secondary) {
- checkLogAllConsistent(secondary);
- checkTotalCounts(secondary, secondary.getDB(dbName)[multiBatchSimpleCollName]);
- });
+ let db = master.getDB(dbName);
+
+ // Add enough documents that dbCheck will take a few seconds.
+ db[collName].insertMany([...Array(10000).keys()].map(x => ({i: x})));
+
+ assert.commandWorked(db.runCommand({"dbCheck": collName}));
+
+ let coll = db[collName];
+
+ while (db.currentOp().inprog.filter(x => x["desc"] === "dbCheck").length) {
+ coll.updateOne({}, {"$inc": {"i": 10}});
+ coll.insertOne({"i": 42});
+ coll.deleteOne({});
}
- // Same thing, but now with concurrent updates.
- function concurrentTestConsistent() {
- let master = replSet.getPrimary();
+ awaitDbCheckCompletion(db);
- let db = master.getDB(dbName);
+ checkLogAllConsistent(master);
+ // Omit check for total counts, which might have changed with concurrent updates.
- // Add enough documents that dbCheck will take a few seconds.
- db[collName].insertMany([...Array(10000).keys()].map(x => ({i: x})));
+ forEachSecondary(secondary => checkLogAllConsistent(secondary, true));
+}
- assert.commandWorked(db.runCommand({"dbCheck": collName}));
+simpleTestConsistent();
+concurrentTestConsistent();
- let coll = db[collName];
+// Test the various other parameters.
+function testDbCheckParameters() {
+ let master = replSet.getPrimary();
+ let db = master.getDB(dbName);
- while (db.currentOp().inprog.filter(x => x["desc"] === "dbCheck").length) {
- coll.updateOne({}, {"$inc": {"i": 10}});
- coll.insertOne({"i": 42});
- coll.deleteOne({});
- }
+ // Clean up for the test.
+ clearLog();
+
+ let docSize = bsonsize({_id: 10});
- awaitDbCheckCompletion(db);
+ function checkEntryBounds(start, end) {
+ forEachNode(function(node) {
+ let healthlog = node.getDB("local").system.healthlog;
+ let keyBoundsResult = healthlog.aggregate([
+ {$match: {operation: "dbCheckBatch"}},
+ {
+ $group:
+ {_id: null, minKey: {$min: "$data.minKey"}, maxKey: {$max: "$data.maxKey"}}
+ }
+ ]);
- checkLogAllConsistent(master);
- // Omit check for total counts, which might have changed with concurrent updates.
+ assert(keyBoundsResult.hasNext(), "dbCheck put no batches in health log");
- forEachSecondary(secondary => checkLogAllConsistent(secondary, true));
+ let bounds = keyBoundsResult.next();
+ assert.eq(bounds.minKey, start, "dbCheck minKey field incorrect");
+ assert.eq(bounds.maxKey, end, "dbCheck maxKey field incorrect");
+
+ let counts = healthLogCounts(healthlog);
+ assert.eq(counts.totalDocs, end - start);
+ assert.eq(counts.totalBytes, (end - start) * docSize);
+ });
}
- simpleTestConsistent();
- concurrentTestConsistent();
-
- // Test the various other parameters.
- function testDbCheckParameters() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
-
- // Clean up for the test.
- clearLog();
-
- let docSize = bsonsize({_id: 10});
-
- function checkEntryBounds(start, end) {
- forEachNode(function(node) {
- let healthlog = node.getDB("local").system.healthlog;
- let keyBoundsResult = healthlog.aggregate([
- {$match: {operation: "dbCheckBatch"}},
- {
- $group: {
- _id: null,
- minKey: {$min: "$data.minKey"},
- maxKey: {$max: "$data.maxKey"}
- }
- }
- ]);
-
- assert(keyBoundsResult.hasNext(), "dbCheck put no batches in health log");
-
- let bounds = keyBoundsResult.next();
- assert.eq(bounds.minKey, start, "dbCheck minKey field incorrect");
- assert.eq(bounds.maxKey, end, "dbCheck maxKey field incorrect");
-
- let counts = healthLogCounts(healthlog);
- assert.eq(counts.totalDocs, end - start);
- assert.eq(counts.totalBytes, (end - start) * docSize);
- });
- }
+ // Run a dbCheck on just a subset of the documents
+ let start = 1000;
+ let end = 9000;
- // Run a dbCheck on just a subset of the documents
- let start = 1000;
- let end = 9000;
+ assert.commandWorked(
+ db.runCommand({dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end}));
- assert.commandWorked(
- db.runCommand({dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end}));
+ awaitDbCheckCompletion(db);
- awaitDbCheckCompletion(db);
+ checkEntryBounds(start, end);
- checkEntryBounds(start, end);
+ // Now, clear the health logs again,
+ clearLog();
- // Now, clear the health logs again,
- clearLog();
+ let maxCount = 5000;
- let maxCount = 5000;
+ // and do the same with a count constraint.
+ assert.commandWorked(db.runCommand(
+ {dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end, maxCount: maxCount}));
- // and do the same with a count constraint.
- assert.commandWorked(db.runCommand(
- {dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end, maxCount: maxCount}));
+ // We expect it to reach the count limit before reaching maxKey.
+ awaitDbCheckCompletion(db);
+ checkEntryBounds(start, start + maxCount);
- // We expect it to reach the count limit before reaching maxKey.
- awaitDbCheckCompletion(db);
- checkEntryBounds(start, start + maxCount);
+ // Finally, do the same with a size constraint.
+ clearLog();
+ let maxSize = maxCount * docSize;
+ assert.commandWorked(db.runCommand(
+ {dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end, maxSize: maxSize}));
+ awaitDbCheckCompletion(db);
+ checkEntryBounds(start, start + maxCount);
+}
- // Finally, do the same with a size constraint.
- clearLog();
- let maxSize = maxCount * docSize;
- assert.commandWorked(db.runCommand(
- {dbCheck: multiBatchSimpleCollName, minKey: start, maxKey: end, maxSize: maxSize}));
- awaitDbCheckCompletion(db);
- checkEntryBounds(start, start + maxCount);
- }
+testDbCheckParameters();
- testDbCheckParameters();
-
- // Now, test some unusual cases where the command should fail.
- function testErrorOnNonexistent() {
- let master = replSet.getPrimary();
- let db = master.getDB("this-probably-doesnt-exist");
- assert.commandFailed(db.runCommand({dbCheck: 1}),
- "dbCheck spuriously succeeded on nonexistent database");
- db = master.getDB(dbName);
- assert.commandFailed(db.runCommand({dbCheck: "this-also-probably-doesnt-exist"}),
- "dbCheck spuriously succeeded on nonexistent collection");
- }
+// Now, test some unusual cases where the command should fail.
+function testErrorOnNonexistent() {
+ let master = replSet.getPrimary();
+ let db = master.getDB("this-probably-doesnt-exist");
+ assert.commandFailed(db.runCommand({dbCheck: 1}),
+ "dbCheck spuriously succeeded on nonexistent database");
+ db = master.getDB(dbName);
+ assert.commandFailed(db.runCommand({dbCheck: "this-also-probably-doesnt-exist"}),
+ "dbCheck spuriously succeeded on nonexistent collection");
+}
- function testErrorOnSecondary() {
- let secondary = replSet.getSecondary();
- let db = secondary.getDB(dbName);
- assert.commandFailed(db.runCommand({dbCheck: collName}));
- }
+function testErrorOnSecondary() {
+ let secondary = replSet.getSecondary();
+ let db = secondary.getDB(dbName);
+ assert.commandFailed(db.runCommand({dbCheck: collName}));
+}
- function testErrorOnUnreplicated() {
- let master = replSet.getPrimary();
- let db = master.getDB("local");
+function testErrorOnUnreplicated() {
+ let master = replSet.getPrimary();
+ let db = master.getDB("local");
- assert.commandFailed(db.runCommand({dbCheck: "oplog.rs"}),
- "dbCheck spuriously succeeded on oplog");
- assert.commandFailed(master.getDB(dbName).runCommand({dbCheck: "system.profile"}),
- "dbCheck spuriously succeeded on system.profile");
- }
+ assert.commandFailed(db.runCommand({dbCheck: "oplog.rs"}),
+ "dbCheck spuriously succeeded on oplog");
+ assert.commandFailed(master.getDB(dbName).runCommand({dbCheck: "system.profile"}),
+ "dbCheck spuriously succeeded on system.profile");
+}
- testErrorOnNonexistent();
- testErrorOnSecondary();
- testErrorOnUnreplicated();
-
- // Test stepdown.
- function testSucceedsOnStepdown() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
-
- let nodeId = replSet.getNodeId(master);
- assert.commandWorked(db.runCommand({dbCheck: multiBatchSimpleCollName}));
-
- // Step down the master.
- assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown: 0, force: true}));
-
- // Wait for the cluster to come up.
- replSet.awaitSecondaryNodes();
-
- // Find the node we ran dbCheck on.
- db = replSet.getSecondaries()
- .filter(function isPreviousMaster(node) {
- return replSet.getNodeId(node) === nodeId;
- })[0]
- .getDB(dbName);
-
- // Check that it's still responding.
- try {
- assert.commandWorked(db.runCommand({ping: 1}),
- "ping failed after stepdown during dbCheck");
- } catch (e) {
- doassert("cannot connect after dbCheck with stepdown");
- }
+testErrorOnNonexistent();
+testErrorOnSecondary();
+testErrorOnUnreplicated();
- // And that our dbCheck completed.
- assert(dbCheckCompleted(db), "dbCheck failed to terminate on stepdown");
- }
+// Test stepdown.
+function testSucceedsOnStepdown() {
+ let master = replSet.getPrimary();
+ let db = master.getDB(dbName);
- testSucceedsOnStepdown();
+ let nodeId = replSet.getNodeId(master);
+ assert.commandWorked(db.runCommand({dbCheck: multiBatchSimpleCollName}));
- function collectionUuid(db, collName) {
- return db.getCollectionInfos().filter(coll => coll.name === collName)[0].info.uuid;
+ // Step down the master.
+ assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown: 0, force: true}));
+
+ // Wait for the cluster to come up.
+ replSet.awaitSecondaryNodes();
+
+ // Find the node we ran dbCheck on.
+ db = replSet.getSecondaries()
+ .filter(function isPreviousMaster(node) {
+ return replSet.getNodeId(node) === nodeId;
+ })[0]
+ .getDB(dbName);
+
+ // Check that it's still responding.
+ try {
+ assert.commandWorked(db.runCommand({ping: 1}), "ping failed after stepdown during dbCheck");
+ } catch (e) {
+ doassert("cannot connect after dbCheck with stepdown");
}
- function getDummyOplogEntry() {
- let master = replSet.getPrimary();
- let coll = master.getDB(dbName)[collName];
+ // And that our dbCheck completed.
+ assert(dbCheckCompleted(db), "dbCheck failed to terminate on stepdown");
+}
- let replSetStatus =
- assert.commandWorked(master.getDB("admin").runCommand({replSetGetStatus: 1}));
- let connStatus = replSetStatus.members.filter(m => m.self)[0];
- let lastOpTime = connStatus.optime;
+testSucceedsOnStepdown();
- let entry = master.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
- entry["ui"] = collectionUuid(master.getDB(dbName), collName);
- entry["ns"] = coll.stats().ns;
- entry["ts"] = new Timestamp();
+function collectionUuid(db, collName) {
+ return db.getCollectionInfos().filter(coll => coll.name === collName)[0].info.uuid;
+}
- return entry;
- }
+function getDummyOplogEntry() {
+ let master = replSet.getPrimary();
+ let coll = master.getDB(dbName)[collName];
- // Create various inconsistencies, and check that dbCheck spots them.
- function insertOnSecondaries(doc) {
- let master = replSet.getPrimary();
- let entry = getDummyOplogEntry();
- entry["op"] = "i";
- entry["o"] = doc;
+ let replSetStatus =
+ assert.commandWorked(master.getDB("admin").runCommand({replSetGetStatus: 1}));
+ let connStatus = replSetStatus.members.filter(m => m.self)[0];
+ let lastOpTime = connStatus.optime;
- master.getDB("local").oplog.rs.insertOne(entry);
- }
+ let entry = master.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
+ entry["ui"] = collectionUuid(master.getDB(dbName), collName);
+ entry["ns"] = coll.stats().ns;
+ entry["ts"] = new Timestamp();
- // Run an apply-ops-ish command on a secondary.
- function runCommandOnSecondaries(doc, ns) {
- let master = replSet.getPrimary();
- let entry = getDummyOplogEntry();
- entry["op"] = "c";
- entry["o"] = doc;
+ return entry;
+}
- if (ns !== undefined) {
- entry["ns"] = ns;
- }
+// Create various inconsistencies, and check that dbCheck spots them.
+function insertOnSecondaries(doc) {
+ let master = replSet.getPrimary();
+ let entry = getDummyOplogEntry();
+ entry["op"] = "i";
+ entry["o"] = doc;
- master.getDB("local").oplog.rs.insertOne(entry);
- }
+ master.getDB("local").oplog.rs.insertOne(entry);
+}
- // And on a primary.
- function runCommandOnPrimary(doc) {
- let master = replSet.getPrimary();
- let entry = getDummyOplogEntry();
- entry["op"] = "c";
- entry["o"] = doc;
+// Run an apply-ops-ish command on a secondary.
+function runCommandOnSecondaries(doc, ns) {
+ let master = replSet.getPrimary();
+ let entry = getDummyOplogEntry();
+ entry["op"] = "c";
+ entry["o"] = doc;
- master.getDB("admin").runCommand({applyOps: [entry]});
+ if (ns !== undefined) {
+ entry["ns"] = ns;
}
- // Just add an extra document, and test that it catches it.
- function simpleTestCatchesExtra() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
+ master.getDB("local").oplog.rs.insertOne(entry);
+}
- clearLog();
+// And on a primary.
+function runCommandOnPrimary(doc) {
+ let master = replSet.getPrimary();
+ let entry = getDummyOplogEntry();
+ entry["op"] = "c";
+ entry["o"] = doc;
- insertOnSecondaries({_id: 12390290});
+ master.getDB("admin").runCommand({applyOps: [entry]});
+}
- assert.commandWorked(db.runCommand({dbCheck: collName}));
- awaitDbCheckCompletion(db);
+// Just add an extra document, and test that it catches it.
+function simpleTestCatchesExtra() {
+ let master = replSet.getPrimary();
+ let db = master.getDB(dbName);
- let nErrors = replSet.getSecondary()
- .getDB("local")
- .system.healthlog.find({operation: /dbCheck.*/, severity: "error"})
- .count();
+ clearLog();
- assert.neq(nErrors, 0, "dbCheck found no errors after insertion on secondaries");
- assert.eq(nErrors, 1, "dbCheck found too many errors after single inconsistent insertion");
- }
+ insertOnSecondaries({_id: 12390290});
- // Test that dbCheck catches changing various pieces of collection metadata.
- function testCollectionMetadataChanges() {
- let master = replSet.getPrimary();
- let db = master.getDB(dbName);
- db[collName].drop();
- clearLog();
+ assert.commandWorked(db.runCommand({dbCheck: collName}));
+ awaitDbCheckCompletion(db);
- // Create the collection on the primary.
- db.createCollection(collName, {validationLevel: "off"});
+ let nErrors = replSet.getSecondary()
+ .getDB("local")
+ .system.healthlog.find({operation: /dbCheck.*/, severity: "error"})
+ .count();
- // Add an index on the secondaries.
- runCommandOnSecondaries({createIndexes: collName, v: 2, key: {"foo": 1}, name: "foo_1"},
- dbName + ".$cmd");
+ assert.neq(nErrors, 0, "dbCheck found no errors after insertion on secondaries");
+ assert.eq(nErrors, 1, "dbCheck found too many errors after single inconsistent insertion");
+}
- assert.commandWorked(db.runCommand({dbCheck: collName}));
- awaitDbCheckCompletion(db);
+// Test that dbCheck catches changing various pieces of collection metadata.
+function testCollectionMetadataChanges() {
+ let master = replSet.getPrimary();
+ let db = master.getDB(dbName);
+ db[collName].drop();
+ clearLog();
- let nErrors =
- replSet.getSecondary()
- .getDB("local")
- .system.healthlog
- .find({"operation": /dbCheck.*/, "severity": "error", "data.success": true})
- .count();
+ // Create the collection on the primary.
+ db.createCollection(collName, {validationLevel: "off"});
- assert.eq(nErrors, 1, "dbCheck found wrong number of errors after inconsistent `create`");
+ // Add an index on the secondaries.
+ runCommandOnSecondaries({createIndexes: collName, v: 2, key: {"foo": 1}, name: "foo_1"},
+ dbName + ".$cmd");
- clearLog();
- }
+ assert.commandWorked(db.runCommand({dbCheck: collName}));
+ awaitDbCheckCompletion(db);
+
+ let nErrors = replSet.getSecondary()
+ .getDB("local")
+ .system.healthlog
+ .find({"operation": /dbCheck.*/, "severity": "error", "data.success": true})
+ .count();
+
+ assert.eq(nErrors, 1, "dbCheck found wrong number of errors after inconsistent `create`");
+
+ clearLog();
+}
- simpleTestCatchesExtra();
- testCollectionMetadataChanges();
+simpleTestCatchesExtra();
+testCollectionMetadataChanges();
})();
diff --git a/jstests/replsets/dbhash_lock_acquisition.js b/jstests/replsets/dbhash_lock_acquisition.js
index 3c66ad1aa48..8fd30e7f7be 100644
--- a/jstests/replsets/dbhash_lock_acquisition.js
+++ b/jstests/replsets/dbhash_lock_acquisition.js
@@ -5,92 +5,91 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/parallelTester.js"); // for ScopedThread
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
-
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(db.getName());
+"use strict";
+
+load("jstests/libs/parallelTester.js"); // for ScopedThread
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
+
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(db.getName());
+
+// We insert a document so the dbHash command has a collection to process.
+assert.commandWorked(sessionDB.mycoll.insert({}, {writeConcern: {w: "majority"}}));
+const clusterTime = session.getOperationTime();
+
+// We then start a transaction in order to be able have a catalog operation queue up behind it.
+session.startTransaction();
+assert.commandWorked(sessionDB.mycoll.insert({}));
+
+const ops = db.currentOp({"lsid.id": session.getSessionId().id}).inprog;
+assert.eq(
+ 1, ops.length, () => "Failed to find session in currentOp() output: " + tojson(db.currentOp()));
+assert.eq(ops[0].locks,
+ {ReplicationStateTransition: "w", Global: "w", Database: "w", Collection: "w"});
+
+const threadCaptruncCmd = new ScopedThread(function(host) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
+
+ // We use the captrunc command as a catalog operation that requires a MODE_X lock on the
+ // collection. This ensures we aren't having the dbHash command queue up behind it on a
+ // database-level lock. The collection isn't capped so it'll fail with an
+ // IllegalOperation error response.
+ assert.commandFailedWithCode(db.runCommand({captrunc: "mycoll", n: 1}),
+ ErrorCodes.IllegalOperation);
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+}, db.getMongo().host);
+
+threadCaptruncCmd.start();
+
+assert.soon(() => {
+ const ops = db.currentOp({"command.captrunc": "mycoll", waitingForLock: true}).inprog;
+ return ops.length === 1;
+}, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
+
+const threadDBHash = new ScopedThread(function(host, clusterTime) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
+ assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: eval(clusterTime),
+ }));
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+}, db.getMongo().host, tojson(clusterTime));
+
+threadDBHash.start();
+
+assert.soon(() => {
+ const ops = db.currentOp({"command.dbHash": 1, waitingForLock: true}).inprog;
+ if (ops.length === 0) {
+ return false;
+ }
+ assert.eq(ops[0].locks,
+ {ReplicationStateTransition: "w", Global: "r", Database: "r", Collection: "r"});
+ return true;
+}, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
- // We insert a document so the dbHash command has a collection to process.
- assert.commandWorked(sessionDB.mycoll.insert({}, {writeConcern: {w: "majority"}}));
- const clusterTime = session.getOperationTime();
+assert.commandWorked(session.commitTransaction_forTesting());
+threadCaptruncCmd.join();
+threadDBHash.join();
- // We then start a transaction in order to be able have a catalog operation queue up behind it.
- session.startTransaction();
- assert.commandWorked(sessionDB.mycoll.insert({}));
+assert.commandWorked(threadCaptruncCmd.returnData());
+assert.commandWorked(threadDBHash.returnData());
- const ops = db.currentOp({"lsid.id": session.getSessionId().id}).inprog;
- assert.eq(1,
- ops.length,
- () => "Failed to find session in currentOp() output: " + tojson(db.currentOp()));
- assert.eq(ops[0].locks,
- {ReplicationStateTransition: "w", Global: "w", Database: "w", Collection: "w"});
-
- const threadCaptruncCmd = new ScopedThread(function(host) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
-
- // We use the captrunc command as a catalog operation that requires a MODE_X lock on the
- // collection. This ensures we aren't having the dbHash command queue up behind it on a
- // database-level lock. The collection isn't capped so it'll fail with an
- // IllegalOperation error response.
- assert.commandFailedWithCode(db.runCommand({captrunc: "mycoll", n: 1}),
- ErrorCodes.IllegalOperation);
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, db.getMongo().host);
-
- threadCaptruncCmd.start();
-
- assert.soon(() => {
- const ops = db.currentOp({"command.captrunc": "mycoll", waitingForLock: true}).inprog;
- return ops.length === 1;
- }, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
-
- const threadDBHash = new ScopedThread(function(host, clusterTime) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
- assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: eval(clusterTime),
- }));
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, db.getMongo().host, tojson(clusterTime));
-
- threadDBHash.start();
-
- assert.soon(() => {
- const ops = db.currentOp({"command.dbHash": 1, waitingForLock: true}).inprog;
- if (ops.length === 0) {
- return false;
- }
- assert.eq(ops[0].locks,
- {ReplicationStateTransition: "w", Global: "r", Database: "r", Collection: "r"});
- return true;
- }, () => "Failed to find create collection in currentOp() output: " + tojson(db.currentOp()));
-
- assert.commandWorked(session.commitTransaction_forTesting());
- threadCaptruncCmd.join();
- threadDBHash.join();
-
- assert.commandWorked(threadCaptruncCmd.returnData());
- assert.commandWorked(threadDBHash.returnData());
-
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/replsets/dbhash_read_at_cluster_time.js b/jstests/replsets/dbhash_read_at_cluster_time.js
index bf7995aade6..e8c42b4e57f 100644
--- a/jstests/replsets/dbhash_read_at_cluster_time.js
+++ b/jstests/replsets/dbhash_read_at_cluster_time.js
@@ -4,117 +4,123 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- const replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- rst.initiate(replSetConfig);
+const replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+rst.initiate(replSetConfig);
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
- const session = primary.startSession({causalConsistency: false});
- const db = session.getDatabase("test");
- let txnNumber = 0;
+const session = primary.startSession({causalConsistency: false});
+const db = session.getDatabase("test");
+let txnNumber = 0;
- // We prevent the replica set from advancing oldest_timestamp. This ensures that the snapshot
- // associated with 'clusterTime' is retained for the duration of this test.
- rst.nodes.forEach(conn => {
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn",
- }));
- });
-
- // We insert a document and save the md5sum associated with the opTime of that write.
- assert.commandWorked(db.mycoll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- const clusterTime = db.getSession().getOperationTime();
-
- let res = assert.commandWorked(db.runCommand({
+// We prevent the replica set from advancing oldest_timestamp. This ensures that the snapshot
+// associated with 'clusterTime' is retained for the duration of this test.
+rst.nodes.forEach(conn => {
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn",
+ }));
+});
+
+// We insert a document and save the md5sum associated with the opTime of that write.
+assert.commandWorked(db.mycoll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+const clusterTime = db.getSession().getOperationTime();
+
+let res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: clusterTime,
+}));
+
+const hash1 = {
+ collections: res.collections,
+ md5: res.md5
+};
+
+// We insert another document to ensure the collection's contents have a different md5sum now.
+// We use a w=majority write concern to ensure that the insert has also been applied on the
+// secondary by the time we go to run the dbHash command later. This avoids a race where the
+// replication subsystem could be applying the insert operation when the dbHash command is run
+// on the secondary.
+assert.commandWorked(db.mycoll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+
+// However, using $_internalReadAtClusterTime to read at the opTime of the first insert should
+// return the same md5sum as it did originally.
+res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: clusterTime,
+}));
+
+const hash2 = {
+ collections: res.collections,
+ md5: res.md5
+};
+assert.eq(hash1, hash2, "primary returned different dbhash after second insert");
+
+{
+ const secondarySession = secondary.startSession({causalConsistency: false});
+ const secondaryDB = secondarySession.getDatabase("test");
+
+ // Using $_internalReadAtClusterTime to read at the opTime of the first insert should return
+ // the same md5sum on the secondary as it did on the primary.
+ res = assert.commandWorked(secondaryDB.runCommand({
dbHash: 1,
$_internalReadAtClusterTime: clusterTime,
}));
- const hash1 = {collections: res.collections, md5: res.md5};
+ const secondaryHash = {collections: res.collections, md5: res.md5};
+ assert.eq(hash1, secondaryHash, "primary and secondary have different dbhash");
+}
- // We insert another document to ensure the collection's contents have a different md5sum now.
- // We use a w=majority write concern to ensure that the insert has also been applied on the
- // secondary by the time we go to run the dbHash command later. This avoids a race where the
- // replication subsystem could be applying the insert operation when the dbHash command is run
- // on the secondary.
- assert.commandWorked(db.mycoll.insert({_id: 2}, {writeConcern: {w: "majority"}}));
+{
+ const otherSession = primary.startSession({causalConsistency: false});
+ const otherDB = otherSession.getDatabase("test");
- // However, using $_internalReadAtClusterTime to read at the opTime of the first insert should
- // return the same md5sum as it did originally.
+ // We perform another insert inside a separate transaction to cause a MODE_IX lock to be
+ // held on the collection.
+ otherSession.startTransaction();
+ assert.commandWorked(otherDB.mycoll.insert({_id: 3}));
+
+ // It should be possible to run the "dbHash" command with "$_internalReadAtClusterTime"
+ // concurrently.
res = assert.commandWorked(db.runCommand({
dbHash: 1,
$_internalReadAtClusterTime: clusterTime,
}));
- const hash2 = {collections: res.collections, md5: res.md5};
- assert.eq(hash1, hash2, "primary returned different dbhash after second insert");
-
- {
- const secondarySession = secondary.startSession({causalConsistency: false});
- const secondaryDB = secondarySession.getDatabase("test");
-
- // Using $_internalReadAtClusterTime to read at the opTime of the first insert should return
- // the same md5sum on the secondary as it did on the primary.
- res = assert.commandWorked(secondaryDB.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: clusterTime,
- }));
-
- const secondaryHash = {collections: res.collections, md5: res.md5};
- assert.eq(hash1, secondaryHash, "primary and secondary have different dbhash");
- }
-
- {
- const otherSession = primary.startSession({causalConsistency: false});
- const otherDB = otherSession.getDatabase("test");
-
- // We perform another insert inside a separate transaction to cause a MODE_IX lock to be
- // held on the collection.
- otherSession.startTransaction();
- assert.commandWorked(otherDB.mycoll.insert({_id: 3}));
-
- // It should be possible to run the "dbHash" command with "$_internalReadAtClusterTime"
- // concurrently.
- res = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: clusterTime,
- }));
-
- const hash3 = {collections: res.collections, md5: res.md5};
- assert.eq(hash1, hash3, "primary returned different dbhash after third insert");
-
- // However, the "dbHash" command should block behind the transaction if
- // "$_internalReadAtClusterTime" wasn't specified.
- res = assert.commandFailedWithCode(db.runCommand({dbHash: 1, maxTimeMS: 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- assert.commandWorked(otherSession.abortTransaction_forTesting());
- otherSession.endSession();
- }
-
- {
- const otherSession = primary.startSession({causalConsistency: false});
- const otherDB = otherSession.getDatabase("test");
-
- // We create another collection inside a separate session to modify the collection catalog
- // at an opTime later than 'clusterTime'. This prevents further usage of the snapshot
- // associated with 'clusterTime' for snapshot reads.
- assert.commandWorked(otherDB.runCommand({create: "mycoll2"}));
- assert.commandFailedWithCode(
- db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}),
- ErrorCodes.SnapshotUnavailable);
-
- otherSession.endSession();
- }
-
- session.endSession();
- rst.stopSet();
+ const hash3 = {collections: res.collections, md5: res.md5};
+ assert.eq(hash1, hash3, "primary returned different dbhash after third insert");
+
+ // However, the "dbHash" command should block behind the transaction if
+ // "$_internalReadAtClusterTime" wasn't specified.
+ res = assert.commandFailedWithCode(db.runCommand({dbHash: 1, maxTimeMS: 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+ assert.commandWorked(otherSession.abortTransaction_forTesting());
+ otherSession.endSession();
+}
+
+{
+ const otherSession = primary.startSession({causalConsistency: false});
+ const otherDB = otherSession.getDatabase("test");
+
+ // We create another collection inside a separate session to modify the collection catalog
+ // at an opTime later than 'clusterTime'. This prevents further usage of the snapshot
+ // associated with 'clusterTime' for snapshot reads.
+ assert.commandWorked(otherDB.runCommand({create: "mycoll2"}));
+ assert.commandFailedWithCode(
+ db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}),
+ ErrorCodes.SnapshotUnavailable);
+
+ otherSession.endSession();
+}
+
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/replsets/dbhash_system_collections.js b/jstests/replsets/dbhash_system_collections.js
index c60532dfe1c..d3f7b83c323 100644
--- a/jstests/replsets/dbhash_system_collections.js
+++ b/jstests/replsets/dbhash_system_collections.js
@@ -2,54 +2,54 @@
'use strict';
(function() {
- var rst = new ReplSetTest({name: 'dbhash_system_collections', nodes: 2});
- rst.startSet();
- rst.initiate();
-
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
-
- var testDB = primary.getDB('test');
- assert.writeOK(testDB.system.users.insert({users: 1}));
- assert.writeOK(testDB.system.js.insert({js: 1}));
-
- var adminDB = primary.getDB('admin');
- assert.writeOK(adminDB.system.roles.insert({roles: 1}));
- assert.writeOK(adminDB.system.version.insert({version: 1}));
- assert.writeOK(adminDB.system.new_users.insert({new_users: 1}));
- assert.writeOK(adminDB.system.backup_users.insert({backup_users: 1}));
-
- rst.awaitReplication();
-
- function checkDbHash(mongo) {
- var testDB = mongo.getDB('test');
- var adminDB = mongo.getDB('admin');
-
- var replicatedSystemCollections = [
- 'system.js',
- 'system.users',
- ];
-
- var replicatedAdminSystemCollections = [
- 'system.backup_users',
- 'system.new_users',
- 'system.roles',
- 'system.version',
- ];
-
- var res = testDB.runCommand('dbhash');
- assert.commandWorked(res);
- assert.docEq(Object.keys(res.collections), replicatedSystemCollections, tojson(res));
-
- res = adminDB.runCommand('dbhash');
- assert.commandWorked(res);
- assert.docEq(Object.keys(res.collections), replicatedAdminSystemCollections, tojson(res));
-
- return res.md5;
- }
-
- var primaryMd5 = checkDbHash(primary);
- var secondaryMd5 = checkDbHash(secondary);
- assert.eq(primaryMd5, secondaryMd5, 'dbhash is different on the primary and the secondary');
- rst.stopSet();
+var rst = new ReplSetTest({name: 'dbhash_system_collections', nodes: 2});
+rst.startSet();
+rst.initiate();
+
+var primary = rst.getPrimary();
+var secondary = rst.getSecondary();
+
+var testDB = primary.getDB('test');
+assert.writeOK(testDB.system.users.insert({users: 1}));
+assert.writeOK(testDB.system.js.insert({js: 1}));
+
+var adminDB = primary.getDB('admin');
+assert.writeOK(adminDB.system.roles.insert({roles: 1}));
+assert.writeOK(adminDB.system.version.insert({version: 1}));
+assert.writeOK(adminDB.system.new_users.insert({new_users: 1}));
+assert.writeOK(adminDB.system.backup_users.insert({backup_users: 1}));
+
+rst.awaitReplication();
+
+function checkDbHash(mongo) {
+ var testDB = mongo.getDB('test');
+ var adminDB = mongo.getDB('admin');
+
+ var replicatedSystemCollections = [
+ 'system.js',
+ 'system.users',
+ ];
+
+ var replicatedAdminSystemCollections = [
+ 'system.backup_users',
+ 'system.new_users',
+ 'system.roles',
+ 'system.version',
+ ];
+
+ var res = testDB.runCommand('dbhash');
+ assert.commandWorked(res);
+ assert.docEq(Object.keys(res.collections), replicatedSystemCollections, tojson(res));
+
+ res = adminDB.runCommand('dbhash');
+ assert.commandWorked(res);
+ assert.docEq(Object.keys(res.collections), replicatedAdminSystemCollections, tojson(res));
+
+ return res.md5;
+}
+
+var primaryMd5 = checkDbHash(primary);
+var secondaryMd5 = checkDbHash(secondary);
+assert.eq(primaryMd5, secondaryMd5, 'dbhash is different on the primary and the secondary');
+rst.stopSet();
})();
diff --git a/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js b/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js
index 8b1dde46cef..2577744902e 100644
--- a/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js
+++ b/jstests/replsets/ddl_op_behind_transaction_fails_in_shutdown.js
@@ -16,102 +16,104 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
- load("jstests/libs/parallel_shell_helpers.js");
- load('jstests/libs/test_background_ops.js');
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/libs/parallel_shell_helpers.js");
+load('jstests/libs/test_background_ops.js');
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const dbName = "test";
- const collName = "ddl_op_behind_prepared_transaction_fails_in_shutdown";
- let primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
- const txnDoc = {_id: 100};
+const dbName = "test";
+const collName = "ddl_op_behind_prepared_transaction_fails_in_shutdown";
+let primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+const txnDoc = {
+ _id: 100
+};
- jsTest.log("Creating a collection '" + collName + "' with data in it...");
- assert.commandWorked(testDB.createCollection(collName));
- let bulk = testColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 2; ++i) {
- bulk.insert({_id: i});
- }
- assert.writeOK(bulk.execute());
+jsTest.log("Creating a collection '" + collName + "' with data in it...");
+assert.commandWorked(testDB.createCollection(collName));
+let bulk = testColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 2; ++i) {
+ bulk.insert({_id: i});
+}
+assert.writeOK(bulk.execute());
- jsTest.log("Setting up a prepared transaction...");
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert(txnDoc));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+jsTest.log("Setting up a prepared transaction...");
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert(txnDoc));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- function runDropCollection(dbName, collName) {
- jsTest.log("Dropping collection in parallel shell...");
- // 'db' is defined in the parallel shell 'startParallelShell' will spin up.
- const res = db.getSiblingDB(dbName).runCommand({drop: collName});
- assert.commandFailedWithCode(
- res,
- [ErrorCodes.InterruptedAtShutdown, ErrorCodes.InterruptedDueToReplStateChange],
- "parallel shell drop cmd completed in an unexpected way: " + tojson(res));
- jsTest.log("Done dropping collection in parallel shell");
- }
+function runDropCollection(dbName, collName) {
+ jsTest.log("Dropping collection in parallel shell...");
+ // 'db' is defined in the parallel shell 'startParallelShell' will spin up.
+ const res = db.getSiblingDB(dbName).runCommand({drop: collName});
+ assert.commandFailedWithCode(
+ res,
+ [ErrorCodes.InterruptedAtShutdown, ErrorCodes.InterruptedDueToReplStateChange],
+ "parallel shell drop cmd completed in an unexpected way: " + tojson(res));
+ jsTest.log("Done dropping collection in parallel shell");
+}
- // Use a failpoint to wait for the drop operation to get as close as possible to a lock request
- // before we release it and wait 1 second more for it to hopefully have time to enqueue a lock
- // request. It takes a while for the parallel shell to start up, establish a connection with the
- // server for the drop operation, etc., and we do not want to interrupt it earlier than lock
- // acquisition with the shutdown signal.
- //
- // This is best-effort, not deterministic, since we cannot place a fail point directly in the
- // locking code as that would hang everything rather than just drop.
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'hangDropCollectionBeforeLockAcquisition', mode: 'alwaysOn'}));
- let joinDropCollection;
- try {
- jsTest.log("Starting a parallel shell to concurrently run drop collection...");
- joinDropCollection =
- startParallelShell(funWithArgs(runDropCollection, dbName, collName), primary.port);
+// Use a failpoint to wait for the drop operation to get as close as possible to a lock request
+// before we release it and wait 1 second more for it to hopefully have time to enqueue a lock
+// request. It takes a while for the parallel shell to start up, establish a connection with the
+// server for the drop operation, etc., and we do not want to interrupt it earlier than lock
+// acquisition with the shutdown signal.
+//
+// This is best-effort, not deterministic, since we cannot place a fail point directly in the
+// locking code as that would hang everything rather than just drop.
+assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: 'hangDropCollectionBeforeLockAcquisition', mode: 'alwaysOn'}));
+let joinDropCollection;
+try {
+ jsTest.log("Starting a parallel shell to concurrently run drop collection...");
+ joinDropCollection =
+ startParallelShell(funWithArgs(runDropCollection, dbName, collName), primary.port);
- jsTest.log("Waiting for drop collection to block behind the prepared transaction...");
- checkLog.contains(
- primary, "Hanging drop collection before lock acquisition while fail point is set");
- } finally {
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'hangDropCollectionBeforeLockAcquisition', mode: 'off'}));
- }
- sleep(1 * 1000);
+ jsTest.log("Waiting for drop collection to block behind the prepared transaction...");
+ checkLog.contains(primary,
+ "Hanging drop collection before lock acquisition while fail point is set");
+} finally {
+ assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: 'hangDropCollectionBeforeLockAcquisition', mode: 'off'}));
+}
+sleep(1 * 1000);
- jsTest.log("Restarting the mongod...");
- // Skip validation because it requires a lock that the prepared transaction is blocking.
- rst.stop(primary, undefined, {skipValidation: true});
- rst.start(primary, {}, true /*restart*/);
- primary = rst.getPrimary();
+jsTest.log("Restarting the mongod...");
+// Skip validation because it requires a lock that the prepared transaction is blocking.
+rst.stop(primary, undefined, {skipValidation: true});
+rst.start(primary, {}, true /*restart*/);
+primary = rst.getPrimary();
- joinDropCollection();
+joinDropCollection();
- const numDocs = primary.getDB(dbName).getCollection(collName).find().length();
- // We expect two documents because the third is in an uncommitted transaction and not visible.
- assert.eq(
- 2,
- numDocs,
- "Expected '" + collName + "' to find 2 documents, found " + numDocs +
- ". Drop collection may have succeeded during shutdown while a transaction was in the " +
- "prepared state.");
+const numDocs = primary.getDB(dbName).getCollection(collName).find().length();
+// We expect two documents because the third is in an uncommitted transaction and not visible.
+assert.eq(
+ 2,
+ numDocs,
+ "Expected '" + collName + "' to find 2 documents, found " + numDocs +
+ ". Drop collection may have succeeded during shutdown while a transaction was in the " +
+ "prepared state.");
- // We will check that the prepared transaction is still active as expected, since we are here.
- assert.commandFailedWithCode(primary.getDB(dbName).runCommand({
- find: collName,
- filter: txnDoc,
- readConcern: {afterClusterTime: prepareTimestamp},
- maxTimeMS: 5000
- }),
- ErrorCodes.MaxTimeMSExpired);
+// We will check that the prepared transaction is still active as expected, since we are here.
+assert.commandFailedWithCode(primary.getDB(dbName).runCommand({
+ find: collName,
+ filter: txnDoc,
+ readConcern: {afterClusterTime: prepareTimestamp},
+ maxTimeMS: 5000
+}),
+ ErrorCodes.MaxTimeMSExpired);
- // Skip validation because it requires a lock that the prepared transaction is blocking.
- rst.stopSet(true /*use default exit signal*/, false /*forRestart*/, {skipValidation: true});
+// Skip validation because it requires a lock that the prepared transaction is blocking.
+rst.stopSet(true /*use default exit signal*/, false /*forRestart*/, {skipValidation: true});
})();
diff --git a/jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js b/jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js
index a0d19c93b57..db2a693a3cb 100644
--- a/jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js
+++ b/jstests/replsets/ddl_ops_after_prepare_lock_failpoint.js
@@ -8,127 +8,130 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/get_index_helpers.js");
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const dbName = "test";
- const collName = "ddl_ops_after_prepare_lock_failpoint";
- const indexName = "test_index";
-
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- // Create the collection we will be working with.
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- // Also build an index (on the same collection) which we will later attempt to drop.
- assert.commandWorked(testDB.runCommand(
- {createIndexes: collName, indexes: [{key: {"num": 1}, name: indexName}]}));
-
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 42}));
-
- PrepareHelpers.prepareTransaction(session);
-
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "alwaysOn"}));
-
- /**
- * Tests that conflicting DDL ops fail immediately.
- */
-
- // Collection names for DDL ops that will fail.
- const collToDrop = collName;
- const collToRenameFrom = collName;
- const collToRenameTo = "rename_collection_to_fail";
- const indexToCreate = "create_index_to_fail";
- const indexToDrop = indexName;
-
- let testDDLOps = () => {
- // Also attempt to delete our original collection (it is in conflict anyway, but should
- // fail to acquire the db lock in the first place).
- assert.throws(function() {
- testDB.getCollection(collToDrop).drop();
- });
- assert(testDB.getCollectionNames().includes(collToDrop));
-
- // Same goes for trying to rename it.
- assert.commandFailedWithCode(
- testDB.getCollection(collToRenameFrom).renameCollection(collToRenameTo),
- ErrorCodes.LockTimeout);
- assert(testDB.getCollectionNames().includes(collToRenameFrom));
- assert(!testDB.getCollectionNames().includes(collToRenameTo));
-
- assert.commandFailedWithCode(testDB.adminCommand({
- renameCollection: testDB.getCollection(collToRenameFrom).getFullName(),
- to: testDB.getSiblingDB('test2').getCollection(collToRenameTo).getFullName(),
- }),
- ErrorCodes.LockTimeout);
-
- // Attempt to add a new index to that collection.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {createIndexes: collName, indexes: [{key: {"b": 1}, name: indexToCreate}]}),
- ErrorCodes.LockTimeout);
- assert.eq(null, GetIndexHelpers.findByName(testColl.getIndexes(), indexToCreate));
-
- // Try dropping the index we created originally. This should also fail.
- assert.commandFailedWithCode(testDB.runCommand({dropIndexes: collName, index: indexToDrop}),
- ErrorCodes.LockTimeout);
- assert.neq(null, GetIndexHelpers.findByName(testColl.getIndexes(), indexToDrop));
- };
-
- /**
- * Tests that CRUD operations on the same collection succeed.
- */
-
- const docToInsert = {num: 100};
- const docToUpdateFrom = docToInsert;
- const docToUpdateTo = {num: 101};
- const docToRemove = docToUpdateTo;
-
- let testCRUDOps = (collConn) => {
- // TODO: SERVER-40167 Having an extra document in the collection is necessary to avoid
- // prepare conflicts when deleting documents.
- assert.commandWorked(collConn.insert({num: 1}));
-
- assert.commandWorked(collConn.insert(docToInsert));
- assert.eq(100, collConn.findOne(docToInsert).num);
-
- // This will not encounter a prepare conflict because there is an index on "num" that
- // eliminates the need for using a collection scan.
- assert.commandWorked(collConn.update(docToUpdateFrom, docToUpdateTo));
- assert.eq(101, collConn.findOne(docToUpdateTo).num);
-
- assert.commandWorked(collConn.remove(docToRemove));
- assert.eq(null, collConn.findOne(docToUpdateFrom));
- assert.eq(null, collConn.findOne(docToUpdateTo));
- };
-
- // First test DDL ops (should fail).
- testDDLOps();
-
- // Then test operations outside of transactions (should succeed).
- testCRUDOps(testColl);
-
- // Also test operations as part of a transaction (should succeed).
- testCRUDOps(primary.startSession({causalConsistency: false})
- .getDatabase(dbName)
- .getCollection(collName));
-
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"}));
-
- assert.commandWorked(session.abortTransaction_forTesting());
- rst.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/get_index_helpers.js");
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const dbName = "test";
+const collName = "ddl_ops_after_prepare_lock_failpoint";
+const indexName = "test_index";
+
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+// Create the collection we will be working with.
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+// Also build an index (on the same collection) which we will later attempt to drop.
+assert.commandWorked(
+ testDB.runCommand({createIndexes: collName, indexes: [{key: {"num": 1}, name: indexName}]}));
+
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 42}));
+
+PrepareHelpers.prepareTransaction(session);
+
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "alwaysOn"}));
+
+/**
+ * Tests that conflicting DDL ops fail immediately.
+ */
+
+// Collection names for DDL ops that will fail.
+const collToDrop = collName;
+const collToRenameFrom = collName;
+const collToRenameTo = "rename_collection_to_fail";
+const indexToCreate = "create_index_to_fail";
+const indexToDrop = indexName;
+
+let testDDLOps = () => {
+ // Also attempt to delete our original collection (it is in conflict anyway, but should
+ // fail to acquire the db lock in the first place).
+ assert.throws(function() {
+ testDB.getCollection(collToDrop).drop();
+ });
+ assert(testDB.getCollectionNames().includes(collToDrop));
+
+ // Same goes for trying to rename it.
+ assert.commandFailedWithCode(
+ testDB.getCollection(collToRenameFrom).renameCollection(collToRenameTo),
+ ErrorCodes.LockTimeout);
+ assert(testDB.getCollectionNames().includes(collToRenameFrom));
+ assert(!testDB.getCollectionNames().includes(collToRenameTo));
+
+ assert.commandFailedWithCode(testDB.adminCommand({
+ renameCollection: testDB.getCollection(collToRenameFrom).getFullName(),
+ to: testDB.getSiblingDB('test2').getCollection(collToRenameTo).getFullName(),
+ }),
+ ErrorCodes.LockTimeout);
+
+ // Attempt to add a new index to that collection.
+ assert.commandFailedWithCode(
+ testDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {"b": 1}, name: indexToCreate}]}),
+ ErrorCodes.LockTimeout);
+ assert.eq(null, GetIndexHelpers.findByName(testColl.getIndexes(), indexToCreate));
+
+ // Try dropping the index we created originally. This should also fail.
+ assert.commandFailedWithCode(testDB.runCommand({dropIndexes: collName, index: indexToDrop}),
+ ErrorCodes.LockTimeout);
+ assert.neq(null, GetIndexHelpers.findByName(testColl.getIndexes(), indexToDrop));
+};
+
+/**
+ * Tests that CRUD operations on the same collection succeed.
+ */
+
+const docToInsert = {
+ num: 100
+};
+const docToUpdateFrom = docToInsert;
+const docToUpdateTo = {
+ num: 101
+};
+const docToRemove = docToUpdateTo;
+
+let testCRUDOps = (collConn) => {
+ // TODO: SERVER-40167 Having an extra document in the collection is necessary to avoid
+ // prepare conflicts when deleting documents.
+ assert.commandWorked(collConn.insert({num: 1}));
+
+ assert.commandWorked(collConn.insert(docToInsert));
+ assert.eq(100, collConn.findOne(docToInsert).num);
+
+ // This will not encounter a prepare conflict because there is an index on "num" that
+ // eliminates the need for using a collection scan.
+ assert.commandWorked(collConn.update(docToUpdateFrom, docToUpdateTo));
+ assert.eq(101, collConn.findOne(docToUpdateTo).num);
+
+ assert.commandWorked(collConn.remove(docToRemove));
+ assert.eq(null, collConn.findOne(docToUpdateFrom));
+ assert.eq(null, collConn.findOne(docToUpdateTo));
+};
+
+// First test DDL ops (should fail).
+testDDLOps();
+
+// Then test operations outside of transactions (should succeed).
+testCRUDOps(testColl);
+
+// Also test operations as part of a transaction (should succeed).
+testCRUDOps(
+ primary.startSession({causalConsistency: false}).getDatabase(dbName).getCollection(collName));
+
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "failNonIntentLocksIfWaitNeeded", mode: "off"}));
+
+assert.commandWorked(session.abortTransaction_forTesting());
+rst.stopSet();
})();
diff --git a/jstests/replsets/disallow_adding_initialized_node1.js b/jstests/replsets/disallow_adding_initialized_node1.js
index 910e71c7d8c..7123070bf92 100644
--- a/jstests/replsets/disallow_adding_initialized_node1.js
+++ b/jstests/replsets/disallow_adding_initialized_node1.js
@@ -5,79 +5,78 @@
// detecting an inconsistent replica set ID in the heartbeat response metadata from B_0.
(function() {
- 'use strict';
- load("jstests/libs/check_log.js");
+'use strict';
+load("jstests/libs/check_log.js");
- var name = 'disallow_adding_initialized_node1';
- var replSetA = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {_id: 10}},
- ]
- });
- replSetA.startSet({dbpath: "$set-A-$node"});
- replSetA.initiate();
+var name = 'disallow_adding_initialized_node1';
+var replSetA = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 10}},
+ ]
+});
+replSetA.startSet({dbpath: "$set-A-$node"});
+replSetA.initiate();
- var replSetB = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {_id: 20}},
- ]
- });
- replSetB.startSet({dbpath: "$set-B-$node"});
- replSetB.initiate();
+var replSetB = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 20}},
+ ]
+});
+replSetB.startSet({dbpath: "$set-B-$node"});
+replSetB.initiate();
- var primaryA = replSetA.getPrimary();
- var primaryB = replSetB.getPrimary();
- jsTestLog('Before merging: primary A = ' + primaryA.host + '; primary B = ' + primaryB.host);
+var primaryA = replSetA.getPrimary();
+var primaryB = replSetB.getPrimary();
+jsTestLog('Before merging: primary A = ' + primaryA.host + '; primary B = ' + primaryB.host);
- var configA = assert.commandWorked(primaryA.adminCommand({replSetGetConfig: 1})).config;
- var configB = assert.commandWorked(primaryB.adminCommand({replSetGetConfig: 1})).config;
- assert(configA.settings.replicaSetId instanceof ObjectId);
- assert(configB.settings.replicaSetId instanceof ObjectId);
- jsTestLog('Replica set A ID = ' + configA.settings.replicaSetId);
- jsTestLog('Replica set B ID = ' + configB.settings.replicaSetId);
- assert.neq(configA.settings.replicaSetId, configB.settings.replicaSetId);
+var configA = assert.commandWorked(primaryA.adminCommand({replSetGetConfig: 1})).config;
+var configB = assert.commandWorked(primaryB.adminCommand({replSetGetConfig: 1})).config;
+assert(configA.settings.replicaSetId instanceof ObjectId);
+assert(configB.settings.replicaSetId instanceof ObjectId);
+jsTestLog('Replica set A ID = ' + configA.settings.replicaSetId);
+jsTestLog('Replica set B ID = ' + configB.settings.replicaSetId);
+assert.neq(configA.settings.replicaSetId, configB.settings.replicaSetId);
- jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
- configA.version++;
- configA.members.push({_id: 11, host: primaryB.host});
- var reconfigResult =
- assert.commandFailedWithCode(primaryA.adminCommand({replSetReconfig: configA}),
- ErrorCodes.NewReplicaSetConfigurationIncompatible);
- var msgA = 'Our replica set ID of ' + configA.settings.replicaSetId +
- ' did not match that of ' + primaryB.host + ', which is ' + configB.settings.replicaSetId;
- assert.neq(-1, reconfigResult.errmsg.indexOf(msgA));
+jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
+configA.version++;
+configA.members.push({_id: 11, host: primaryB.host});
+var reconfigResult =
+ assert.commandFailedWithCode(primaryA.adminCommand({replSetReconfig: configA}),
+ ErrorCodes.NewReplicaSetConfigurationIncompatible);
+var msgA = 'Our replica set ID of ' + configA.settings.replicaSetId + ' did not match that of ' +
+ primaryB.host + ', which is ' + configB.settings.replicaSetId;
+assert.neq(-1, reconfigResult.errmsg.indexOf(msgA));
- var newPrimaryA = replSetA.getPrimary();
- var newPrimaryB = replSetB.getPrimary();
- jsTestLog('After merging: primary A = ' + newPrimaryA.host + '; primary B = ' +
- newPrimaryB.host);
- assert.eq(primaryA, newPrimaryA);
- assert.eq(primaryB, newPrimaryB);
+var newPrimaryA = replSetA.getPrimary();
+var newPrimaryB = replSetB.getPrimary();
+jsTestLog('After merging: primary A = ' + newPrimaryA.host + '; primary B = ' + newPrimaryB.host);
+assert.eq(primaryA, newPrimaryA);
+assert.eq(primaryB, newPrimaryB);
- // Mismatch replica set IDs in heartbeat responses should be logged.
- var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
- "; remote node's: " + configA.settings.replicaSetId;
- checkLog.contains(primaryB, msgB);
+// Mismatch replica set IDs in heartbeat responses should be logged.
+var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
+ "; remote node's: " + configA.settings.replicaSetId;
+checkLog.contains(primaryB, msgB);
- var statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
- var statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
- jsTestLog('After merging: replica set status A = ' + tojson(statusA));
- jsTestLog('After merging: replica set status B = ' + tojson(statusB));
+var statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
+var statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
+jsTestLog('After merging: replica set status A = ' + tojson(statusA));
+jsTestLog('After merging: replica set status B = ' + tojson(statusB));
- // Replica set A's config should remain unchanged due to failed replSetReconfig command.
- assert.eq(1, statusA.members.length);
- assert.eq(10, statusA.members[0]._id);
- assert.eq(primaryA.host, statusA.members[0].name);
- assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
+// Replica set A's config should remain unchanged due to failed replSetReconfig command.
+assert.eq(1, statusA.members.length);
+assert.eq(10, statusA.members[0]._id);
+assert.eq(primaryA.host, statusA.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
- // Replica set B's config should remain unchanged.
- assert.eq(1, statusB.members.length);
- assert.eq(20, statusB.members[0]._id);
- assert.eq(primaryB.host, statusB.members[0].name);
- assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
+// Replica set B's config should remain unchanged.
+assert.eq(1, statusB.members.length);
+assert.eq(20, statusB.members[0]._id);
+assert.eq(primaryB.host, statusB.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
- replSetB.stopSet();
- replSetA.stopSet();
+replSetB.stopSet();
+replSetA.stopSet();
})();
diff --git a/jstests/replsets/disallow_adding_initialized_node2.js b/jstests/replsets/disallow_adding_initialized_node2.js
index e92fc77880b..12de734b411 100644
--- a/jstests/replsets/disallow_adding_initialized_node2.js
+++ b/jstests/replsets/disallow_adding_initialized_node2.js
@@ -10,87 +10,86 @@
// @tags: [requires_persistence]
(function() {
- 'use strict';
- load("jstests/libs/check_log.js");
+'use strict';
+load("jstests/libs/check_log.js");
- var name = 'disallow_adding_initialized_node2';
- var replSetA = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {_id: 10}},
- {rsConfig: {_id: 11, arbiterOnly: true}},
- ]
- });
- replSetA.startSet({dbpath: "$set-A-$node"});
- replSetA.initiate();
+var name = 'disallow_adding_initialized_node2';
+var replSetA = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 10}},
+ {rsConfig: {_id: 11, arbiterOnly: true}},
+ ]
+});
+replSetA.startSet({dbpath: "$set-A-$node"});
+replSetA.initiate();
- var replSetB = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {_id: 20}},
- ]
- });
- replSetB.startSet({dbpath: "$set-B-$node"});
- replSetB.initiate();
+var replSetB = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 20}},
+ ]
+});
+replSetB.startSet({dbpath: "$set-B-$node"});
+replSetB.initiate();
- var primaryA = replSetA.getPrimary();
- var primaryB = replSetB.getPrimary();
- jsTestLog('Before merging: primary A = ' + primaryA.host + '; primary B = ' + primaryB.host);
+var primaryA = replSetA.getPrimary();
+var primaryB = replSetB.getPrimary();
+jsTestLog('Before merging: primary A = ' + primaryA.host + '; primary B = ' + primaryB.host);
- var configA = assert.commandWorked(primaryA.adminCommand({replSetGetConfig: 1})).config;
- var configB = assert.commandWorked(primaryB.adminCommand({replSetGetConfig: 1})).config;
- assert(configA.settings.replicaSetId instanceof ObjectId);
- assert(configB.settings.replicaSetId instanceof ObjectId);
- jsTestLog('Replica set A ID = ' + configA.settings.replicaSetId);
- jsTestLog('Replica set B ID = ' + configB.settings.replicaSetId);
- assert.neq(configA.settings.replicaSetId, configB.settings.replicaSetId);
+var configA = assert.commandWorked(primaryA.adminCommand({replSetGetConfig: 1})).config;
+var configB = assert.commandWorked(primaryB.adminCommand({replSetGetConfig: 1})).config;
+assert(configA.settings.replicaSetId instanceof ObjectId);
+assert(configB.settings.replicaSetId instanceof ObjectId);
+jsTestLog('Replica set A ID = ' + configA.settings.replicaSetId);
+jsTestLog('Replica set B ID = ' + configB.settings.replicaSetId);
+assert.neq(configA.settings.replicaSetId, configB.settings.replicaSetId);
- jsTestLog("Stopping B's primary " + primaryB.host);
- replSetB.stop(0);
+jsTestLog("Stopping B's primary " + primaryB.host);
+replSetB.stop(0);
- jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
- configA.version++;
- configA.members.push({_id: 12, host: primaryB.host});
- assert.commandWorked(primaryA.adminCommand({replSetReconfig: configA}));
+jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
+configA.version++;
+configA.members.push({_id: 12, host: primaryB.host});
+assert.commandWorked(primaryA.adminCommand({replSetReconfig: configA}));
- jsTestLog("Restarting B's primary " + primaryB.host);
- primaryB = replSetB.start(0, {dbpath: "$set-B-$node", restart: true});
+jsTestLog("Restarting B's primary " + primaryB.host);
+primaryB = replSetB.start(0, {dbpath: "$set-B-$node", restart: true});
- var newPrimaryA = replSetA.getPrimary();
- var newPrimaryB = replSetB.getPrimary();
- jsTestLog('After merging: primary A = ' + newPrimaryA.host + '; primary B = ' +
- newPrimaryB.host);
- assert.eq(primaryA, newPrimaryA);
- assert.eq(primaryB, newPrimaryB);
+var newPrimaryA = replSetA.getPrimary();
+var newPrimaryB = replSetB.getPrimary();
+jsTestLog('After merging: primary A = ' + newPrimaryA.host + '; primary B = ' + newPrimaryB.host);
+assert.eq(primaryA, newPrimaryA);
+assert.eq(primaryB, newPrimaryB);
- // Mismatch replica set IDs in heartbeat responses should be logged.
- var msgA = "replica set IDs do not match, ours: " + configA.settings.replicaSetId +
- "; remote node's: " + configB.settings.replicaSetId;
- var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
- "; remote node's: " + configA.settings.replicaSetId;
- checkLog.contains(primaryA, msgA);
- checkLog.contains(primaryB, msgB);
+// Mismatch replica set IDs in heartbeat responses should be logged.
+var msgA = "replica set IDs do not match, ours: " + configA.settings.replicaSetId +
+ "; remote node's: " + configB.settings.replicaSetId;
+var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
+ "; remote node's: " + configA.settings.replicaSetId;
+checkLog.contains(primaryA, msgA);
+checkLog.contains(primaryB, msgB);
- var statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
- var statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
- jsTestLog('After merging: replica set status A = ' + tojson(statusA));
- jsTestLog('After merging: replica set status B = ' + tojson(statusB));
+var statusA = assert.commandWorked(primaryA.adminCommand({replSetGetStatus: 1}));
+var statusB = assert.commandWorked(primaryB.adminCommand({replSetGetStatus: 1}));
+jsTestLog('After merging: replica set status A = ' + tojson(statusA));
+jsTestLog('After merging: replica set status B = ' + tojson(statusB));
- // B's primary should show up in A's status as DOWN.
- assert.eq(3, statusA.members.length);
- assert.eq(10, statusA.members[0]._id);
- assert.eq(primaryA.host, statusA.members[0].name);
- assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
- assert.eq(12, statusA.members[2]._id);
- assert.eq(primaryB.host, statusA.members[2].name);
- assert.eq(ReplSetTest.State.DOWN, statusA.members[2].state);
+// B's primary should show up in A's status as DOWN.
+assert.eq(3, statusA.members.length);
+assert.eq(10, statusA.members[0]._id);
+assert.eq(primaryA.host, statusA.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusA.members[0].state);
+assert.eq(12, statusA.members[2]._id);
+assert.eq(primaryB.host, statusA.members[2].name);
+assert.eq(ReplSetTest.State.DOWN, statusA.members[2].state);
- // Replica set B's config should remain unchanged.
- assert.eq(1, statusB.members.length);
- assert.eq(20, statusB.members[0]._id);
- assert.eq(primaryB.host, statusB.members[0].name);
- assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
+// Replica set B's config should remain unchanged.
+assert.eq(1, statusB.members.length);
+assert.eq(20, statusB.members[0]._id);
+assert.eq(primaryB.host, statusB.members[0].name);
+assert.eq(ReplSetTest.State.PRIMARY, statusB.members[0].state);
- replSetB.stopSet();
- replSetA.stopSet();
+replSetB.stopSet();
+replSetA.stopSet();
})();
diff --git a/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js b/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js
index 12ebc3eb40d..bb16e3966b9 100644
--- a/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js
+++ b/jstests/replsets/disallow_shardsvr_transactions_wcMajorityJournal_false.js
@@ -6,47 +6,47 @@
*/
(function() {
- "use strict";
-
- // A testing exemption was made to allow transactions on shard server even if
- // writeConcernMajorityJournalDefault = false. So we need to disable the exemption in this test
- // in order to test the behavior.
- jsTest.setOption('enableTestCommands', false);
-
- // The following two options by default do not support enableTestCommands=false, change them
- // accordingly so this test can run.
- TestData.roleGraphInvalidationIsFatal = false;
- TestData.authenticationDatabase = "local";
-
- // Start the replica set with --shardsvr.
- const replSet = new ReplSetTest({nodes: 1, nodeOptions: {shardsvr: ""}});
- replSet.startSet();
- let conf = replSet.getReplSetConfig();
- conf.writeConcernMajorityJournalDefault = false;
- replSet.initiate(conf);
-
- const primary = replSet.getPrimary();
- const session = primary.startSession();
- const sessionDb = session.getDatabase("test");
- const sessionColl = sessionDb.getCollection("foo");
-
- jsTestLog("Test that non-transactional operations are allowed.");
- assert.commandWorked(sessionColl.insert({_id: 1}));
-
- jsTestLog("Test that transactions are not allowed.");
- session.startTransaction();
- assert.commandFailedWithCode(sessionColl.insert({_id: 2}),
- ErrorCodes.OperationNotSupportedInTransaction);
- // All commands are not allowed including abortTransaction.
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.OperationNotSupportedInTransaction);
-
- jsTestLog("Test that retryable writes are allowed.");
- assert.commandWorked(
- sessionDb.runCommand({insert: "foo", documents: [{_id: 3}], txnNumber: NumberLong(1)}));
-
- // Assert documents inserted.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 1}, {_id: 3}]);
-
- replSet.stopSet();
+"use strict";
+
+// A testing exemption was made to allow transactions on shard server even if
+// writeConcernMajorityJournalDefault = false. So we need to disable the exemption in this test
+// in order to test the behavior.
+jsTest.setOption('enableTestCommands', false);
+
+// The following two options by default do not support enableTestCommands=false, change them
+// accordingly so this test can run.
+TestData.roleGraphInvalidationIsFatal = false;
+TestData.authenticationDatabase = "local";
+
+// Start the replica set with --shardsvr.
+const replSet = new ReplSetTest({nodes: 1, nodeOptions: {shardsvr: ""}});
+replSet.startSet();
+let conf = replSet.getReplSetConfig();
+conf.writeConcernMajorityJournalDefault = false;
+replSet.initiate(conf);
+
+const primary = replSet.getPrimary();
+const session = primary.startSession();
+const sessionDb = session.getDatabase("test");
+const sessionColl = sessionDb.getCollection("foo");
+
+jsTestLog("Test that non-transactional operations are allowed.");
+assert.commandWorked(sessionColl.insert({_id: 1}));
+
+jsTestLog("Test that transactions are not allowed.");
+session.startTransaction();
+assert.commandFailedWithCode(sessionColl.insert({_id: 2}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+// All commands are not allowed including abortTransaction.
+assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.OperationNotSupportedInTransaction);
+
+jsTestLog("Test that retryable writes are allowed.");
+assert.commandWorked(
+ sessionDb.runCommand({insert: "foo", documents: [{_id: 3}], txnNumber: NumberLong(1)}));
+
+// Assert documents inserted.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 1}, {_id: 3}]);
+
+replSet.stopSet();
}());
diff --git a/jstests/replsets/disconnect_on_legacy_write_to_secondary.js b/jstests/replsets/disconnect_on_legacy_write_to_secondary.js
index 1afa8ba0f33..9a5474e190c 100644
--- a/jstests/replsets/disconnect_on_legacy_write_to_secondary.js
+++ b/jstests/replsets/disconnect_on_legacy_write_to_secondary.js
@@ -2,107 +2,106 @@
* Tests that legacy writes to secondaries result in no answer and a disconnection.
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const collname = "disconnect_on_legacy_write_to_secondary";
- const coll = primary.getDB("test")[collname];
- const secondaryDb = secondary.getDB("test");
- const secondaryColl = secondaryDb[collname];
-
- // Never retry on network error, because this test needs to detect the network error.
- TestData.skipRetryOnNetworkError = true;
- secondary.forceWriteMode('legacy');
- assert.commandWorked(coll.insert([{_id: 'deleteme'}, {_id: 'updateme'}]));
- rst.awaitReplication();
-
- jsTestLog("Trying legacy insert on secondary");
- secondaryColl.insert({_id: 'no_insert_on_secondary'});
- let res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
+"use strict";
+
+load("jstests/libs/check_log.js");
+
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const collname = "disconnect_on_legacy_write_to_secondary";
+const coll = primary.getDB("test")[collname];
+const secondaryDb = secondary.getDB("test");
+const secondaryColl = secondaryDb[collname];
+
+// Never retry on network error, because this test needs to detect the network error.
+TestData.skipRetryOnNetworkError = true;
+secondary.forceWriteMode('legacy');
+assert.commandWorked(coll.insert([{_id: 'deleteme'}, {_id: 'updateme'}]));
+rst.awaitReplication();
+
+jsTestLog("Trying legacy insert on secondary");
+secondaryColl.insert({_id: 'no_insert_on_secondary'});
+let res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
+assert(isNetworkError(res));
+// We should automatically reconnect after the failed command.
+assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
+
+jsTestLog("Trying legacy update on secondary");
+secondaryColl.update({_id: 'updateme'}, {'$set': {x: 1}});
+res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
+assert(isNetworkError(res));
+// We should automatically reconnect after the failed command.
+assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
+
+jsTestLog("Trying legacy remove on secondary");
+secondaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}});
+res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
+assert(isNetworkError(res));
+// We should automatically reconnect after the failed command.
+assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
+
+// Do the stepdown tests on a separate connection to avoid interfering with the
+// ReplSetTest mechanism.
+const primaryAdmin = primary.getDB("admin");
+const primaryDataConn = new Mongo(primary.host);
+const primaryDb = primaryDataConn.getDB("test");
+const primaryColl = primaryDb[collname];
+primaryDataConn.forceWriteMode('legacy');
+
+function getNotMasterLegacyUnackWritesCounter() {
+ return assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1}))
+ .metrics.repl.network.notMasterLegacyUnacknowledgedWrites;
+}
+
+function runStepDownTest({description, failpoint, operation}) {
+ jsTestLog("Enabling failpoint to block " + description + "s");
+ assert.commandWorked(
+ primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+
+ let failedLegacyUnackWritesBefore = getNotMasterLegacyUnackWritesCounter();
+
+ jsTestLog("Trying legacy " + description + " on stepping-down primary");
+ operation();
+ checkLog.contains(primary, failpoint + " fail point enabled");
+ jsTestLog("Within " + description + ": stepping down and disabling failpoint");
+ assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ assert.commandWorked(primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+ res = assert.throws(() => primaryDb.adminCommand({ping: 1}));
assert(isNetworkError(res));
// We should automatically reconnect after the failed command.
- assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
-
- jsTestLog("Trying legacy update on secondary");
- secondaryColl.update({_id: 'updateme'}, {'$set': {x: 1}});
- res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
- assert(isNetworkError(res));
- // We should automatically reconnect after the failed command.
- assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
-
- jsTestLog("Trying legacy remove on secondary");
- secondaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}});
- res = assert.throws(() => secondaryDb.adminCommand({ping: 1}));
- assert(isNetworkError(res));
- // We should automatically reconnect after the failed command.
- assert.commandWorked(secondaryDb.adminCommand({ping: 1}));
-
- // Do the stepdown tests on a separate connection to avoid interfering with the
- // ReplSetTest mechanism.
- const primaryAdmin = primary.getDB("admin");
- const primaryDataConn = new Mongo(primary.host);
- const primaryDb = primaryDataConn.getDB("test");
- const primaryColl = primaryDb[collname];
- primaryDataConn.forceWriteMode('legacy');
-
- function getNotMasterLegacyUnackWritesCounter() {
- return assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1}))
- .metrics.repl.network.notMasterLegacyUnacknowledgedWrites;
- }
-
- function runStepDownTest({description, failpoint, operation}) {
- jsTestLog("Enabling failpoint to block " + description + "s");
- assert.commandWorked(
- primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
-
- let failedLegacyUnackWritesBefore = getNotMasterLegacyUnackWritesCounter();
-
- jsTestLog("Trying legacy " + description + " on stepping-down primary");
- operation();
- checkLog.contains(primary, failpoint + " fail point enabled");
- jsTestLog("Within " + description + ": stepping down and disabling failpoint");
- assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- assert.commandWorked(
- primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- res = assert.throws(() => primaryDb.adminCommand({ping: 1}));
- assert(isNetworkError(res));
- // We should automatically reconnect after the failed command.
- assert.commandWorked(primaryDb.adminCommand({ping: 1}));
-
- // Validate the number of legacy unacknowledged writes failed due to step down resulted
- // in network disconnection.
- let failedLegacyUnackWritesAfter = getNotMasterLegacyUnackWritesCounter();
- assert.eq(failedLegacyUnackWritesAfter, failedLegacyUnackWritesBefore + 1);
-
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
- }
- runStepDownTest({
- description: "insert",
- failpoint: "hangDuringBatchInsert",
- operation: () => primaryColl.insert({_id: 'no_insert_on_stepdown'})
- });
-
- runStepDownTest({
- description: "update",
- failpoint: "hangDuringBatchUpdate",
- operation: () => primaryColl.update({_id: 'updateme'}, {'$set': {x: 1}})
- });
-
- runStepDownTest({
- description: "remove",
- failpoint: "hangDuringBatchRemove",
- operation: () => primaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}})
- });
-
- rst.stopSet();
+ assert.commandWorked(primaryDb.adminCommand({ping: 1}));
+
+ // Validate the number of legacy unacknowledged writes failed due to step down resulted
+ // in network disconnection.
+ let failedLegacyUnackWritesAfter = getNotMasterLegacyUnackWritesCounter();
+ assert.eq(failedLegacyUnackWritesAfter, failedLegacyUnackWritesBefore + 1);
+
+ // Allow the primary to be re-elected, and wait for it.
+ assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+}
+runStepDownTest({
+ description: "insert",
+ failpoint: "hangDuringBatchInsert",
+ operation: () => primaryColl.insert({_id: 'no_insert_on_stepdown'})
+});
+
+runStepDownTest({
+ description: "update",
+ failpoint: "hangDuringBatchUpdate",
+ operation: () => primaryColl.update({_id: 'updateme'}, {'$set': {x: 1}})
+});
+
+runStepDownTest({
+ description: "remove",
+ failpoint: "hangDuringBatchRemove",
+ operation: () => primaryColl.remove({_id: 'deleteme'}, {'$set': {x: 1}})
+});
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js b/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
index b5e67365eb4..35440fcb441 100644
--- a/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
+++ b/jstests/replsets/do_not_advance_commit_point_beyond_last_applied_term.js
@@ -5,110 +5,112 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/check_log.js");
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- // Set up a ReplSetTest where nodes only sync one oplog entry at a time.
- const rst = new ReplSetTest(
- {nodes: 5, useBridge: true, nodeOptions: {setParameter: "bgSyncOplogFetcherBatchSize=1"}});
- rst.startSet();
- const config = rst.getReplSetConfig();
- // Prevent elections.
- config.settings = {electionTimeoutMillis: 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Set up a ReplSetTest where nodes only sync one oplog entry at a time.
+const rst = new ReplSetTest(
+ {nodes: 5, useBridge: true, nodeOptions: {setParameter: "bgSyncOplogFetcherBatchSize=1"}});
+rst.startSet();
+const config = rst.getReplSetConfig();
+// Prevent elections.
+config.settings = {
+ electionTimeoutMillis: 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- const nodeA = rst.nodes[0];
- const nodeB = rst.nodes[1];
- const nodeC = rst.nodes[2];
- const nodeD = rst.nodes[3];
- const nodeE = rst.nodes[4];
+const nodeA = rst.nodes[0];
+const nodeB = rst.nodes[1];
+const nodeC = rst.nodes[2];
+const nodeD = rst.nodes[3];
+const nodeE = rst.nodes[4];
- jsTest.log("Node A is primary in term 1. Node E is delayed.");
- // A: [1]
- // B: [1]
- // C: [1]
- // D: [1]
- // E:
- assert.eq(nodeA, rst.getPrimary());
- nodeE.disconnect([nodeA, nodeB, nodeC, nodeD]);
- assert.commandWorked(nodeA.getDB(dbName)[collName].insert({term: 1}));
- rst.awaitReplication(undefined, undefined, [nodeB, nodeC, nodeD]);
+jsTest.log("Node A is primary in term 1. Node E is delayed.");
+// A: [1]
+// B: [1]
+// C: [1]
+// D: [1]
+// E:
+assert.eq(nodeA, rst.getPrimary());
+nodeE.disconnect([nodeA, nodeB, nodeC, nodeD]);
+assert.commandWorked(nodeA.getDB(dbName)[collName].insert({term: 1}));
+rst.awaitReplication(undefined, undefined, [nodeB, nodeC, nodeD]);
- jsTest.log("Node B steps up in term 2 and performs a write, which is not replicated.");
- // A: [1]
- // B: [1] [2]
- // C: [1]
- // D: [1]
- // E:
- stopServerReplication([nodeA, nodeC, nodeD]);
- assert.commandWorked(nodeB.adminCommand({replSetStepUp: 1}));
- rst.waitForState(nodeA, ReplSetTest.State.SECONDARY);
- assert.eq(nodeB, rst.getPrimary());
- assert.commandWorked(nodeB.getDB(dbName)[collName].insert({term: 2}));
+jsTest.log("Node B steps up in term 2 and performs a write, which is not replicated.");
+// A: [1]
+// B: [1] [2]
+// C: [1]
+// D: [1]
+// E:
+stopServerReplication([nodeA, nodeC, nodeD]);
+assert.commandWorked(nodeB.adminCommand({replSetStepUp: 1}));
+rst.waitForState(nodeA, ReplSetTest.State.SECONDARY);
+assert.eq(nodeB, rst.getPrimary());
+assert.commandWorked(nodeB.getDB(dbName)[collName].insert({term: 2}));
- jsTest.log("Node A steps up again in term 3 with votes from A, C, and D and commits a write.");
- // A: [1] [3]
- // B: [1] [2]
- // C: [1] [3]
- // D: [1] [3]
- // E:
- nodeB.disconnect([nodeA, nodeC, nodeD, nodeE]);
- assert.commandWorked(nodeA.adminCommand({replSetStepUp: 1}));
- restartServerReplication([nodeA, nodeC, nodeD]);
- assert.soon(() => {
- // We cannot use getPrimary() here because 2 nodes report they are primary.
- return assert.commandWorked(nodeA.adminCommand({ismaster: 1})).ismaster;
- });
- assert.commandWorked(
- nodeA.getDB(dbName)[collName].insert({term: 3}, {writeConcern: {w: "majority"}}));
- assert.eq(1, nodeC.getDB(dbName)[collName].find({term: 3}).itcount());
- assert.eq(1, nodeD.getDB(dbName)[collName].find({term: 3}).itcount());
+jsTest.log("Node A steps up again in term 3 with votes from A, C, and D and commits a write.");
+// A: [1] [3]
+// B: [1] [2]
+// C: [1] [3]
+// D: [1] [3]
+// E:
+nodeB.disconnect([nodeA, nodeC, nodeD, nodeE]);
+assert.commandWorked(nodeA.adminCommand({replSetStepUp: 1}));
+restartServerReplication([nodeA, nodeC, nodeD]);
+assert.soon(() => {
+ // We cannot use getPrimary() here because 2 nodes report they are primary.
+ return assert.commandWorked(nodeA.adminCommand({ismaster: 1})).ismaster;
+});
+assert.commandWorked(
+ nodeA.getDB(dbName)[collName].insert({term: 3}, {writeConcern: {w: "majority"}}));
+assert.eq(1, nodeC.getDB(dbName)[collName].find({term: 3}).itcount());
+assert.eq(1, nodeD.getDB(dbName)[collName].find({term: 3}).itcount());
- jsTest.log("Node E syncs from a majority node and learns the new commit point in term 3.");
- // A: [1] [3]
- // B: [1] [2]
- // C: [1] [3]
- // D: [1] [3]
- // E: [1]
- // The stopReplProducerOnDocument failpoint ensures that Node E stops replicating before
- // applying the document {msg: "new primary"}, which is the first document of term 3. This
- // depends on the oplog fetcher batch size being 1.
- assert.commandWorked(nodeE.adminCommand({
- configureFailPoint: "stopReplProducerOnDocument",
- mode: "alwaysOn",
- data: {document: {msg: "new primary"}}
- }));
- nodeE.reconnect([nodeA, nodeC, nodeD]);
- checkLog.contains(nodeE, "stopReplProducerOnDocument fail point is enabled.");
- assert.soon(() => {
- return 1 === nodeE.getDB(dbName)[collName].find({term: 1}).itcount();
- });
- assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 3}).itcount());
+jsTest.log("Node E syncs from a majority node and learns the new commit point in term 3.");
+// A: [1] [3]
+// B: [1] [2]
+// C: [1] [3]
+// D: [1] [3]
+// E: [1]
+// The stopReplProducerOnDocument failpoint ensures that Node E stops replicating before
+// applying the document {msg: "new primary"}, which is the first document of term 3. This
+// depends on the oplog fetcher batch size being 1.
+assert.commandWorked(nodeE.adminCommand({
+ configureFailPoint: "stopReplProducerOnDocument",
+ mode: "alwaysOn",
+ data: {document: {msg: "new primary"}}
+}));
+nodeE.reconnect([nodeA, nodeC, nodeD]);
+checkLog.contains(nodeE, "stopReplProducerOnDocument fail point is enabled.");
+assert.soon(() => {
+ return 1 === nodeE.getDB(dbName)[collName].find({term: 1}).itcount();
+});
+assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 3}).itcount());
- jsTest.log("Node E switches its sync source to B and replicates the stale branch of term 2.");
- nodeE.disconnect([nodeA, nodeC, nodeD]);
- nodeB.reconnect(nodeE);
- assert.commandWorked(
- nodeE.adminCommand({configureFailPoint: "stopReplProducerOnDocument", mode: "off"}));
- assert.soon(() => {
- return 1 === nodeE.getDB(dbName)[collName].find({term: 2}).itcount();
- });
+jsTest.log("Node E switches its sync source to B and replicates the stale branch of term 2.");
+nodeE.disconnect([nodeA, nodeC, nodeD]);
+nodeB.reconnect(nodeE);
+assert.commandWorked(
+ nodeE.adminCommand({configureFailPoint: "stopReplProducerOnDocument", mode: "off"}));
+assert.soon(() => {
+ return 1 === nodeE.getDB(dbName)[collName].find({term: 2}).itcount();
+});
- jsTest.log("Node E must not return the entry in term 2 as committed.");
- assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 2}).readConcern("majority").itcount());
+jsTest.log("Node E must not return the entry in term 2 as committed.");
+assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 2}).readConcern("majority").itcount());
- jsTest.log("Reconnect the set. Node E must roll back successfully.");
- nodeE.reconnect([nodeA, nodeC, nodeD]);
- nodeB.reconnect([nodeA, nodeC, nodeD]);
- rst.awaitReplication();
- assert.eq(1, nodeE.getDB(dbName)[collName].find({term: 1}).itcount());
- assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 2}).itcount());
- assert.eq(1, nodeE.getDB(dbName)[collName].find({term: 3}).itcount());
+jsTest.log("Reconnect the set. Node E must roll back successfully.");
+nodeE.reconnect([nodeA, nodeC, nodeD]);
+nodeB.reconnect([nodeA, nodeC, nodeD]);
+rst.awaitReplication();
+assert.eq(1, nodeE.getDB(dbName)[collName].find({term: 1}).itcount());
+assert.eq(0, nodeE.getDB(dbName)[collName].find({term: 2}).itcount());
+assert.eq(1, nodeE.getDB(dbName)[collName].find({term: 3}).itcount());
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/drain.js b/jstests/replsets/drain.js
index 41e8d475f83..e1d008aebc0 100644
--- a/jstests/replsets/drain.js
+++ b/jstests/replsets/drain.js
@@ -10,101 +10,101 @@
// 8. Ensure the ops in queue are applied and that the PRIMARY begins to accept writes as usual.
(function() {
- "use strict";
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- "_id": "testSet",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ],
- // No primary catch-up so we focus on the drain mode.
- "settings": {"catchUpTimeoutMillis": 0},
- });
+"use strict";
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ "_id": "testSet",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ],
+ // No primary catch-up so we focus on the drain mode.
+ "settings": {"catchUpTimeoutMillis": 0},
+});
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
- // Do an initial insert to prevent the secondary from going into recovery
- var numDocuments = 20;
- var bulk = primary.getDB("foo").foo.initializeUnorderedBulkOp();
- var bigString = Array(1024 * 1024).toString();
- assert.writeOK(primary.getDB("foo").foo.insert({big: bigString}));
- replSet.awaitReplication();
- assert.commandWorked(secondary.getDB("admin").runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
- 'failed to enable fail point on secondary');
+// Do an initial insert to prevent the secondary from going into recovery
+var numDocuments = 20;
+var bulk = primary.getDB("foo").foo.initializeUnorderedBulkOp();
+var bigString = Array(1024 * 1024).toString();
+assert.writeOK(primary.getDB("foo").foo.insert({big: bigString}));
+replSet.awaitReplication();
+assert.commandWorked(
+ secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
+ 'failed to enable fail point on secondary');
- var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
- for (var i = 1; i < numDocuments; ++i) {
- bulk.insert({big: bigString});
- }
- assert.writeOK(bulk.execute());
- jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
- assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
+var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
+for (var i = 1; i < numDocuments; ++i) {
+ bulk.insert({big: bigString});
+}
+assert.writeOK(bulk.execute());
+jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
+assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
- assert.soon(function() {
- var serverStatus = secondary.getDB('foo').serverStatus();
- var bufferCount = serverStatus.metrics.repl.buffer.count;
- var bufferCountChange = bufferCount - bufferCountBefore;
- jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
- bufferCountChange);
- return bufferCountChange >= numDocuments - 1;
- }, 'secondary did not buffer operations for new inserts on primary', 300000, 1000);
+assert.soon(function() {
+ var serverStatus = secondary.getDB('foo').serverStatus();
+ var bufferCount = serverStatus.metrics.repl.buffer.count;
+ var bufferCountChange = bufferCount - bufferCountBefore;
+ jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
+ bufferCountChange);
+ return bufferCountChange >= numDocuments - 1;
+}, 'secondary did not buffer operations for new inserts on primary', 300000, 1000);
- // Kill primary; secondary will enter drain mode to catch up
- primary.getDB("admin").shutdownServer({force: true});
+// Kill primary; secondary will enter drain mode to catch up
+primary.getDB("admin").shutdownServer({force: true});
- replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
+replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
- // Ensure new primary is not yet writable
- jsTestLog('New primary should not be writable yet');
- assert.writeError(secondary.getDB("foo").flag.insert({sentinel: 2}));
- assert(!secondary.getDB("admin").runCommand({"isMaster": 1}).ismaster);
+// Ensure new primary is not yet writable
+jsTestLog('New primary should not be writable yet');
+assert.writeError(secondary.getDB("foo").flag.insert({sentinel: 2}));
+assert(!secondary.getDB("admin").runCommand({"isMaster": 1}).ismaster);
- // Ensure new primary is not yet readable without slaveOk bit.
- secondary.slaveOk = false;
- jsTestLog('New primary should not be readable yet, without slaveOk bit');
- var res = secondary.getDB("foo").runCommand({find: "foo"});
- assert.commandFailed(res);
- assert.eq(ErrorCodes.NotMasterNoSlaveOk,
- res.code,
- "find failed with unexpected error code: " + tojson(res));
- // Nor should it be readable with the slaveOk bit.
- secondary.slaveOk = true;
- assert.commandWorked(secondary.getDB("foo").runCommand({find: "foo"}));
+// Ensure new primary is not yet readable without slaveOk bit.
+secondary.slaveOk = false;
+jsTestLog('New primary should not be readable yet, without slaveOk bit');
+var res = secondary.getDB("foo").runCommand({find: "foo"});
+assert.commandFailed(res);
+assert.eq(ErrorCodes.NotMasterNoSlaveOk,
+ res.code,
+ "find failed with unexpected error code: " + tojson(res));
+// Nor should it be readable with the slaveOk bit.
+secondary.slaveOk = true;
+assert.commandWorked(secondary.getDB("foo").runCommand({find: "foo"}));
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
- // Allow draining to complete
- jsTestLog('Disabling fail point on new primary to allow draining to complete');
- assert.commandWorked(
- secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
- 'failed to disable fail point on new primary');
- primary = replSet.getPrimary();
+// Allow draining to complete
+jsTestLog('Disabling fail point on new primary to allow draining to complete');
+assert.commandWorked(
+ secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
+ 'failed to disable fail point on new primary');
+primary = replSet.getPrimary();
- assert.commandWorked(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 30000,
- }),
- 'replSetTest waitForDrainFinish should work when draining is allowed to complete');
+assert.commandWorked(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 30000,
+ }),
+ 'replSetTest waitForDrainFinish should work when draining is allowed to complete');
- // Ensure new primary is writable
- jsTestLog('New primary should be writable after draining is complete');
- assert.writeOK(primary.getDB("foo").flag.insert({sentinel: 1}));
- // Check for at least two entries. There was one prior to freezing op application on the
- // secondary and we cannot guarantee all writes reached the secondary's op queue prior to
- // shutting down the original primary.
- assert.gte(primary.getDB("foo").foo.find().itcount(), 2);
- replSet.stopSet();
+// Ensure new primary is writable
+jsTestLog('New primary should be writable after draining is complete');
+assert.writeOK(primary.getDB("foo").flag.insert({sentinel: 1}));
+// Check for at least two entries. There was one prior to freezing op application on the
+// secondary and we cannot guarantee all writes reached the secondary's op queue prior to
+// shutting down the original primary.
+assert.gte(primary.getDB("foo").foo.find().itcount(), 2);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/drop_collections_two_phase.js b/jstests/replsets/drop_collections_two_phase.js
index 8b6b3bab79e..ac8b727834a 100644
--- a/jstests/replsets/drop_collections_two_phase.js
+++ b/jstests/replsets/drop_collections_two_phase.js
@@ -4,34 +4,34 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
+// COMMIT collection drop.
+twoPhaseDropTest.commitDropCollection(collName);
- twoPhaseDropTest.stop();
+twoPhaseDropTest.stop();
}());
diff --git a/jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js b/jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js
index 23992c17a21..26a018a863f 100644
--- a/jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js
+++ b/jstests/replsets/drop_collections_two_phase_apply_ops_convert_to_capped.js
@@ -3,98 +3,97 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_apply_ops_convert_to_capped";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_apply_ops_convert_to_capped";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- try {
- // Converting a drop-pending collection to a capped collection returns NamespaceNotFound.
- const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
- const dropPendingCollName = dropPendingColl.name;
- const primary = replTest.getPrimary();
- const convertToCappedCmdWithName = {
- convertToCapped: dropPendingCollName,
- size: 100000,
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to convert collection with system.drop namespace: ' +
- tojson(convertToCappedCmdWithName));
- assert.commandFailedWithCode(primary.getDB(dbName).runCommand(convertToCappedCmdWithName),
- ErrorCodes.NamespaceNotFound);
- let dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
- assert(dropPendingCollInfo,
- 'convertToCapped using collection name ' + dropPendingCollName +
- ' affected drop-pending collection state unexpectedly');
- assert(!dropPendingCollInfo.options.capped);
- assert(!twoPhaseDropTest.collectionExists(collName));
+try {
+ // Converting a drop-pending collection to a capped collection returns NamespaceNotFound.
+ const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ const dropPendingCollName = dropPendingColl.name;
+ const primary = replTest.getPrimary();
+ const convertToCappedCmdWithName = {
+ convertToCapped: dropPendingCollName,
+ size: 100000,
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to convert collection with system.drop namespace: ' +
+ tojson(convertToCappedCmdWithName));
+ assert.commandFailedWithCode(primary.getDB(dbName).runCommand(convertToCappedCmdWithName),
+ ErrorCodes.NamespaceNotFound);
+ let dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ assert(dropPendingCollInfo,
+ 'convertToCapped using collection name ' + dropPendingCollName +
+ ' affected drop-pending collection state unexpectedly');
+ assert(!dropPendingCollInfo.options.capped);
+ assert(!twoPhaseDropTest.collectionExists(collName));
- // Converting a drop-pending collection to a capped collection using applyOps with
- // system.drop namespace.
- const cmdNs = dbName + '.$cmd';
- const applyOpsCmdWithName = {
- applyOps:
- [{op: 'c', ns: cmdNs, o: {convertToCapped: dropPendingCollName, size: 100000}}]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to convert collection using applyOps with system.drop namespace: ' +
- tojson(applyOpsCmdWithName));
- // NamespaceNotFound is ignored, but the drop-pending collection shouldn't be affected.
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'applyOps using collection name ' + dropPendingCollName +
- ' affected drop-pending collection state unexpectedly');
+ // Converting a drop-pending collection to a capped collection using applyOps with
+ // system.drop namespace.
+ const cmdNs = dbName + '.$cmd';
+ const applyOpsCmdWithName = {
+ applyOps: [{op: 'c', ns: cmdNs, o: {convertToCapped: dropPendingCollName, size: 100000}}]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to convert collection using applyOps with system.drop namespace: ' +
+ tojson(applyOpsCmdWithName));
+ // NamespaceNotFound is ignored, but the drop-pending collection shouldn't be affected.
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
+ assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'applyOps using collection name ' + dropPendingCollName +
+ ' affected drop-pending collection state unexpectedly');
- // Converting a drop-pending collection to a capped collection using applyOps with UUID.
- const dropPendingCollUuid = dropPendingColl.info.uuid;
- const applyOpsCmdWithUuid = {
- applyOps: [{
- op: 'c',
- ns: cmdNs,
- ui: dropPendingCollUuid,
- o: {convertToCapped: 'ignored_collection_name', size: 100000}
- }]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to convert collection using applyOps with UUID: ' +
- tojson(applyOpsCmdWithUuid));
- // NamespaceNotFound is ignored, but the drop-pending collection shouldn't be affected.
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
- dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
- assert(dropPendingCollInfo,
- 'applyOps using UUID ' + dropPendingCollUuid +
- ' affected drop-pending collection state unexpectedly');
- assert(!dropPendingCollInfo.options.capped);
- assert.eq(dropPendingCollUuid.hex(),
- dropPendingCollInfo.info.uuid.hex(),
- 'drop pending collection UUID does not match UUID of original collection: ' +
- tojson(dropPendingCollInfo));
- assert(!twoPhaseDropTest.collectionExists(collName));
+ // Converting a drop-pending collection to a capped collection using applyOps with UUID.
+ const dropPendingCollUuid = dropPendingColl.info.uuid;
+ const applyOpsCmdWithUuid = {
+ applyOps: [{
+ op: 'c',
+ ns: cmdNs,
+ ui: dropPendingCollUuid,
+ o: {convertToCapped: 'ignored_collection_name', size: 100000}
+ }]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to convert collection using applyOps with UUID: ' +
+ tojson(applyOpsCmdWithUuid));
+ // NamespaceNotFound is ignored, but the drop-pending collection shouldn't be affected.
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
+ dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ assert(dropPendingCollInfo,
+ 'applyOps using UUID ' + dropPendingCollUuid +
+ ' affected drop-pending collection state unexpectedly');
+ assert(!dropPendingCollInfo.options.capped);
+ assert.eq(dropPendingCollUuid.hex(),
+ dropPendingCollInfo.info.uuid.hex(),
+ 'drop pending collection UUID does not match UUID of original collection: ' +
+ tojson(dropPendingCollInfo));
+ assert(!twoPhaseDropTest.collectionExists(collName));
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
- } finally {
- twoPhaseDropTest.stop();
- }
+ // COMMIT collection drop.
+ twoPhaseDropTest.commitDropCollection(collName);
+} finally {
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_apply_ops_create.js b/jstests/replsets/drop_collections_two_phase_apply_ops_create.js
index 792be894d8d..34998dce7ec 100644
--- a/jstests/replsets/drop_collections_two_phase_apply_ops_create.js
+++ b/jstests/replsets/drop_collections_two_phase_apply_ops_create.js
@@ -6,63 +6,58 @@
*/
(function() {
- "use strict";
-
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
-
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_apply_ops_create";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
-
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
-
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
-
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
-
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
-
- try {
- // Create collection using applyOps with UUID that belongs to a drop-pending collection.
- const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
- const dropPendingCollName = dropPendingColl.name;
- const primary = replTest.getPrimary();
- const cmdNs = dbName + '.$cmd';
- const dropPendingCollUuid = dropPendingColl.info.uuid;
- const applyOpsCmdWithUuid = {
- applyOps: [{
- op: 'c',
- ns: cmdNs,
- ui: dropPendingCollUuid,
- o: {create: 'ignored_collection_name'}
- }]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to create collection using applyOps with UUID: ' +
- tojson(applyOpsCmdWithUuid));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
- const dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
- assert(dropPendingCollInfo,
- 'applyOps using UUID ' + dropPendingCollUuid +
- ' changed drop-pending state on collection unexpectedly');
- assert.eq(dropPendingCollUuid.hex(),
- dropPendingCollInfo.info.uuid.hex(),
- 'drop pending collection UUID does not match UUID of original collection: ' +
- tojson(dropPendingCollInfo));
-
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
- } finally {
- twoPhaseDropTest.stop();
- }
+"use strict";
+
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_apply_ops_create";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
+
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
+
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
+
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
+
+try {
+ // Create collection using applyOps with UUID that belongs to a drop-pending collection.
+ const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ const dropPendingCollName = dropPendingColl.name;
+ const primary = replTest.getPrimary();
+ const cmdNs = dbName + '.$cmd';
+ const dropPendingCollUuid = dropPendingColl.info.uuid;
+ const applyOpsCmdWithUuid = {
+ applyOps:
+ [{op: 'c', ns: cmdNs, ui: dropPendingCollUuid, o: {create: 'ignored_collection_name'}}]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to create collection using applyOps with UUID: ' + tojson(applyOpsCmdWithUuid));
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
+ const dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ assert(dropPendingCollInfo,
+ 'applyOps using UUID ' + dropPendingCollUuid +
+ ' changed drop-pending state on collection unexpectedly');
+ assert.eq(dropPendingCollUuid.hex(),
+ dropPendingCollInfo.info.uuid.hex(),
+ 'drop pending collection UUID does not match UUID of original collection: ' +
+ tojson(dropPendingCollInfo));
+
+ // COMMIT collection drop.
+ twoPhaseDropTest.commitDropCollection(collName);
+} finally {
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_apply_ops_drop.js b/jstests/replsets/drop_collections_two_phase_apply_ops_drop.js
index 8cfbca31166..0d83fc8602b 100644
--- a/jstests/replsets/drop_collections_two_phase_apply_ops_drop.js
+++ b/jstests/replsets/drop_collections_two_phase_apply_ops_drop.js
@@ -4,61 +4,62 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_apply_ops_noop";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_apply_ops_noop";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- // Drop drop-pending collection using applyOps with system.drop namespace.
- const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
- const dropPendingCollName = dropPendingColl.name;
- const primary = replTest.getPrimary();
- const cmdNs = dbName + '.$cmd';
- const applyOpsCmdWithName = {applyOps: [{op: 'c', ns: cmdNs, o: {drop: dropPendingCollName}}]};
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to drop collection using applyOps with system.drop namespace: ' +
- tojson(applyOpsCmdWithName));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'applyOps using collection name ' + dropPendingCollName +
- ' removed drop-pending collection unexpectedly');
+// Drop drop-pending collection using applyOps with system.drop namespace.
+const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
+const dropPendingCollName = dropPendingColl.name;
+const primary = replTest.getPrimary();
+const cmdNs = dbName + '.$cmd';
+const applyOpsCmdWithName = {
+ applyOps: [{op: 'c', ns: cmdNs, o: {drop: dropPendingCollName}}]
+};
+TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to drop collection using applyOps with system.drop namespace: ' +
+ tojson(applyOpsCmdWithName));
+assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
+assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'applyOps using collection name ' + dropPendingCollName +
+ ' removed drop-pending collection unexpectedly');
- // Drop drop-pending collection using applyOps with UUID.
- const dropPendingCollUuid = dropPendingColl.info.uuid;
- const applyOpsCmdWithUuid = {
- applyOps:
- [{op: 'c', ns: cmdNs, ui: dropPendingCollUuid, o: {drop: 'ignored_collection_name'}}]
- };
- TwoPhaseDropCollectionTest._testLog('Attempting to drop collection using applyOps with UUID: ' +
- tojson(applyOpsCmdWithUuid));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'applyOps using UUID ' + dropPendingCollUuid +
- ' removed drop-pending collection unexpectedly');
+// Drop drop-pending collection using applyOps with UUID.
+const dropPendingCollUuid = dropPendingColl.info.uuid;
+const applyOpsCmdWithUuid = {
+ applyOps: [{op: 'c', ns: cmdNs, ui: dropPendingCollUuid, o: {drop: 'ignored_collection_name'}}]
+};
+TwoPhaseDropCollectionTest._testLog('Attempting to drop collection using applyOps with UUID: ' +
+ tojson(applyOpsCmdWithUuid));
+assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
+assert(
+ twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'applyOps using UUID ' + dropPendingCollUuid + ' removed drop-pending collection unexpectedly');
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
+// COMMIT collection drop.
+twoPhaseDropTest.commitDropCollection(collName);
- twoPhaseDropTest.stop();
+twoPhaseDropTest.stop();
}());
diff --git a/jstests/replsets/drop_collections_two_phase_apply_ops_rename.js b/jstests/replsets/drop_collections_two_phase_apply_ops_rename.js
index 8db6ffaf42e..7a957df0269 100644
--- a/jstests/replsets/drop_collections_two_phase_apply_ops_rename.js
+++ b/jstests/replsets/drop_collections_two_phase_apply_ops_rename.js
@@ -4,78 +4,77 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_apply_ops_rename";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_apply_ops_rename";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- try {
- // Rename drop-pending collection using applyOps with system.drop namespace.
- const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
- const dropPendingCollName = dropPendingColl.name;
- const primary = replTest.getPrimary();
- const cmdNs = dbName + '.$cmd';
- const sourceNs = dbName + '.' + dropPendingCollName;
- const destNs = dbName + '.bar';
- const applyOpsCmdWithName = {
- applyOps: [{op: 'c', ns: cmdNs, o: {renameCollection: sourceNs, to: destNs}}]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to rename collection using applyOps with system.drop namespace: ' +
- tojson(applyOpsCmdWithName));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'applyOps using collection name ' + dropPendingCollName +
- ' renamed drop-pending collection unexpectedly');
- assert(!twoPhaseDropTest.collectionExists(collName));
+try {
+ // Rename drop-pending collection using applyOps with system.drop namespace.
+ const dropPendingColl = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ const dropPendingCollName = dropPendingColl.name;
+ const primary = replTest.getPrimary();
+ const cmdNs = dbName + '.$cmd';
+ const sourceNs = dbName + '.' + dropPendingCollName;
+ const destNs = dbName + '.bar';
+ const applyOpsCmdWithName = {
+ applyOps: [{op: 'c', ns: cmdNs, o: {renameCollection: sourceNs, to: destNs}}]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to rename collection using applyOps with system.drop namespace: ' +
+ tojson(applyOpsCmdWithName));
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithName));
+ assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'applyOps using collection name ' + dropPendingCollName +
+ ' renamed drop-pending collection unexpectedly');
+ assert(!twoPhaseDropTest.collectionExists(collName));
- // Rename drop-pending collection using applyOps with UUID.
- const dropPendingCollUuid = dropPendingColl.info.uuid;
- const applyOpsCmdWithUuid = {
- applyOps: [{
- op: 'c',
- ns: cmdNs,
- ui: dropPendingCollUuid,
- o: {renameCollection: dbName + '.ignored_collection_name', to: destNs}
- }]
- };
- TwoPhaseDropCollectionTest._testLog(
- 'Attempting to rename collection using applyOps with UUID: ' +
- tojson(applyOpsCmdWithUuid));
- assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
- const dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
- assert(dropPendingCollInfo,
- 'applyOps using UUID ' + dropPendingCollUuid +
- ' renamed drop-pending collection unexpectedly');
- assert.eq(dropPendingCollUuid.hex(),
- dropPendingCollInfo.info.uuid.hex(),
- 'drop pending collection UUID does not match UUID of original collection: ' +
- tojson(dropPendingCollInfo));
+ // Rename drop-pending collection using applyOps with UUID.
+ const dropPendingCollUuid = dropPendingColl.info.uuid;
+ const applyOpsCmdWithUuid = {
+ applyOps: [{
+ op: 'c',
+ ns: cmdNs,
+ ui: dropPendingCollUuid,
+ o: {renameCollection: dbName + '.ignored_collection_name', to: destNs}
+ }]
+ };
+ TwoPhaseDropCollectionTest._testLog(
+ 'Attempting to rename collection using applyOps with UUID: ' + tojson(applyOpsCmdWithUuid));
+ assert.commandWorked(primary.adminCommand(applyOpsCmdWithUuid));
+ const dropPendingCollInfo = twoPhaseDropTest.collectionIsPendingDrop(collName);
+ assert(dropPendingCollInfo,
+ 'applyOps using UUID ' + dropPendingCollUuid +
+ ' renamed drop-pending collection unexpectedly');
+ assert.eq(dropPendingCollUuid.hex(),
+ dropPendingCollInfo.info.uuid.hex(),
+ 'drop pending collection UUID does not match UUID of original collection: ' +
+ tojson(dropPendingCollInfo));
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
- } finally {
- twoPhaseDropTest.stop();
- }
+ // COMMIT collection drop.
+ twoPhaseDropTest.commitDropCollection(collName);
+} finally {
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_dbhash.js b/jstests/replsets/drop_collections_two_phase_dbhash.js
index b04b631bc09..058a6f09aeb 100644
--- a/jstests/replsets/drop_collections_two_phase_dbhash.js
+++ b/jstests/replsets/drop_collections_two_phase_dbhash.js
@@ -4,51 +4,49 @@
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Compute db hash for all collections on given database.
- function getDbHash(database) {
- let res =
- assert.commandWorked(database.runCommand({dbhash: 1}), "'dbHash' command failed.");
- return res.md5;
- }
+// Compute db hash for all collections on given database.
+function getDbHash(database) {
+ let res = assert.commandWorked(database.runCommand({dbhash: 1}), "'dbHash' command failed.");
+ return res.md5;
+}
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_long_index_names";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_long_index_names";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- let primaryDB = replTest.getPrimary().getDB(dbName);
+let primaryDB = replTest.getPrimary().getDB(dbName);
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // Save the dbHash while drop is in 'pending' state.
- twoPhaseDropTest.prepareDropCollection(collName);
- let dropPendingDbHash = getDbHash(primaryDB);
+// Save the dbHash while drop is in 'pending' state.
+twoPhaseDropTest.prepareDropCollection(collName);
+let dropPendingDbHash = getDbHash(primaryDB);
- // Save the dbHash after the drop has been committed.
- twoPhaseDropTest.commitDropCollection(collName);
- let dropCommittedDbHash = getDbHash(primaryDB);
+// Save the dbHash after the drop has been committed.
+twoPhaseDropTest.commitDropCollection(collName);
+let dropCommittedDbHash = getDbHash(primaryDB);
- // The dbHash calculation should ignore drop pending collections. Therefore, therefore, the hash
- // during prepare phase and commit phase should match.
- let failMsg = "dbHash during drop pending phase did not match dbHash after drop was committed.";
- assert.eq(dropPendingDbHash, dropCommittedDbHash, failMsg);
-
- replTest.stopSet();
+// The dbHash calculation should ignore drop pending collections. Therefore, therefore, the hash
+// during prepare phase and commit phase should match.
+let failMsg = "dbHash during drop pending phase did not match dbHash after drop was committed.";
+assert.eq(dropPendingDbHash, dropCommittedDbHash, failMsg);
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
index ad180601c2d..d7b16cdc790 100644
--- a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
+++ b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
@@ -4,116 +4,116 @@
*/
(function() {
- 'use strict';
-
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- load('jstests/libs/check_log.js'); // For checkLog.contains().
-
- // Return a list of all indexes for a given collection. Use 'args' as the
- // 'listIndexes' command arguments.
- // Assumes all indexes in the collection fit in the first batch of results.
- function listIndexes(database, coll, args) {
- args = args || {};
- let failMsg = "'listIndexes' command failed";
- let listIndexesCmd = {listIndexes: coll};
- let res = assert.commandWorked(database.runCommand(listIndexesCmd, args), failMsg);
- return res.cursor.firstBatch;
- }
-
- // Set up a two phase drop test.
- let testName = 'drop_collection_two_phase_rename_drop_target';
- let dbName = testName;
- let fromCollName = 'collToRename';
- let toCollName = 'collToDrop';
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
-
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
-
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
-
- // Create the collections that will be renamed and dropped.
- twoPhaseDropTest.createCollection(fromCollName);
- twoPhaseDropTest.createCollection(toCollName);
-
- // Collection renames with dropTarget set to true should handle long index names in the target
- // collection gracefully. MMAPv1 imposes a hard limit on index namespaces so we have to drop
- // indexes that are too long to store on disk after renaming the collection.
- const primary = replTest.getPrimary();
- const testDb = primary.getDB(dbName);
- const fromColl = testDb.getCollection(fromCollName);
- const toColl = testDb.getCollection(toCollName);
- let longIndexName = 'a'.repeat(8192);
- let shortIndexName = "short_name";
-
- // In the target collection, which will be dropped, create one index with a "too long" name, and
- // one with a name of acceptable size.
- assert.commandWorked(toColl.ensureIndex({a: 1}, {name: longIndexName}));
- assert.commandWorked(toColl.ensureIndex({b: 1}, {name: shortIndexName}));
-
- // Insert documents into both collections so that we can tell them apart.
- assert.writeOK(fromColl.insert({_id: 'from'}));
- assert.writeOK(toColl.insert({_id: 'to'}));
+'use strict';
+
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load('jstests/libs/check_log.js'); // For checkLog.contains().
+
+// Return a list of all indexes for a given collection. Use 'args' as the
+// 'listIndexes' command arguments.
+// Assumes all indexes in the collection fit in the first batch of results.
+function listIndexes(database, coll, args) {
+ args = args || {};
+ let failMsg = "'listIndexes' command failed";
+ let listIndexesCmd = {listIndexes: coll};
+ let res = assert.commandWorked(database.runCommand(listIndexesCmd, args), failMsg);
+ return res.cursor.firstBatch;
+}
+
+// Set up a two phase drop test.
+let testName = 'drop_collection_two_phase_rename_drop_target';
+let dbName = testName;
+let fromCollName = 'collToRename';
+let toCollName = 'collToDrop';
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
+
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
+
+// Create the collections that will be renamed and dropped.
+twoPhaseDropTest.createCollection(fromCollName);
+twoPhaseDropTest.createCollection(toCollName);
+
+// Collection renames with dropTarget set to true should handle long index names in the target
+// collection gracefully. MMAPv1 imposes a hard limit on index namespaces so we have to drop
+// indexes that are too long to store on disk after renaming the collection.
+const primary = replTest.getPrimary();
+const testDb = primary.getDB(dbName);
+const fromColl = testDb.getCollection(fromCollName);
+const toColl = testDb.getCollection(toCollName);
+let longIndexName = 'a'.repeat(8192);
+let shortIndexName = "short_name";
+
+// In the target collection, which will be dropped, create one index with a "too long" name, and
+// one with a name of acceptable size.
+assert.commandWorked(toColl.ensureIndex({a: 1}, {name: longIndexName}));
+assert.commandWorked(toColl.ensureIndex({b: 1}, {name: shortIndexName}));
+
+// Insert documents into both collections so that we can tell them apart.
+assert.writeOK(fromColl.insert({_id: 'from'}));
+assert.writeOK(toColl.insert({_id: 'to'}));
+replTest.awaitReplication();
+
+// Prevent renameCollection from being applied on the secondary so that we can examine the state
+// of the primary after target collection has been dropped.
+jsTestLog('Pausing oplog application on the secondary node.');
+const secondary = replTest.getSecondary();
+twoPhaseDropTest.pauseOplogApplication(secondary);
+
+// This logs each operation being applied.
+const previousLogLevel =
+ assert.commandWorked(primary.setLogLevel(1, 'storage')).was.replication.verbosity;
+
+try {
+ // When the target collection exists, the renameCollection command should fail if dropTarget
+ // flag is set to false or is omitted.
+ jsTestLog(
+ 'Checking renameCollection error handling when dropTarget is set to false and target collection exists.');
+ let dropTarget = false;
+ assert.commandFailedWithCode(fromColl.renameCollection(toCollName, dropTarget),
+ ErrorCodes.NamespaceExists);
+
+ // Rename collection with dropTarget set to true. Check collection contents after rename.
+ jsTestLog('Renaming collection ' + fromColl.getFullName() + ' to ' + toColl.getFullName() +
+ ' with dropTarget set to true.');
+ dropTarget = true;
+ assert.commandWorked(fromColl.renameCollection(toColl.getName(), dropTarget));
+ assert(!twoPhaseDropTest.collectionExists(fromCollName));
+ assert(twoPhaseDropTest.collectionExists(toCollName));
+ assert.eq({_id: 'from'}, toColl.findOne());
+
+ // Confirm that original target collection is now a drop-pending collection.
+ const isPendingDropResult = twoPhaseDropTest.collectionIsPendingDrop(toCollName);
+ assert(isPendingDropResult);
+ const droppedCollName = isPendingDropResult.name;
+ jsTestLog('Original target collection is now in a drop-pending state: ' + droppedCollName);
+
+ // COMMIT collection drop.
+ twoPhaseDropTest.resumeOplogApplication(secondary);
replTest.awaitReplication();
+ assert.soonNoExcept(function() {
+ return !twoPhaseDropTest.collectionIsPendingDrop(toCollName);
+ });
- // Prevent renameCollection from being applied on the secondary so that we can examine the state
- // of the primary after target collection has been dropped.
- jsTestLog('Pausing oplog application on the secondary node.');
- const secondary = replTest.getSecondary();
- twoPhaseDropTest.pauseOplogApplication(secondary);
-
- // This logs each operation being applied.
- const previousLogLevel =
- assert.commandWorked(primary.setLogLevel(1, 'storage')).was.replication.verbosity;
-
- try {
- // When the target collection exists, the renameCollection command should fail if dropTarget
- // flag is set to false or is omitted.
- jsTestLog(
- 'Checking renameCollection error handling when dropTarget is set to false and target collection exists.');
- let dropTarget = false;
- assert.commandFailedWithCode(fromColl.renameCollection(toCollName, dropTarget),
- ErrorCodes.NamespaceExists);
-
- // Rename collection with dropTarget set to true. Check collection contents after rename.
- jsTestLog('Renaming collection ' + fromColl.getFullName() + ' to ' + toColl.getFullName() +
- ' with dropTarget set to true.');
- dropTarget = true;
- assert.commandWorked(fromColl.renameCollection(toColl.getName(), dropTarget));
- assert(!twoPhaseDropTest.collectionExists(fromCollName));
- assert(twoPhaseDropTest.collectionExists(toCollName));
- assert.eq({_id: 'from'}, toColl.findOne());
-
- // Confirm that original target collection is now a drop-pending collection.
- const isPendingDropResult = twoPhaseDropTest.collectionIsPendingDrop(toCollName);
- assert(isPendingDropResult);
- const droppedCollName = isPendingDropResult.name;
- jsTestLog('Original target collection is now in a drop-pending state: ' + droppedCollName);
-
- // COMMIT collection drop.
- twoPhaseDropTest.resumeOplogApplication(secondary);
- replTest.awaitReplication();
- assert.soonNoExcept(function() {
- return !twoPhaseDropTest.collectionIsPendingDrop(toCollName);
- });
-
- // Confirm in the logs that the renameCollection dropped the target collection on the
- // secondary using two phase collection drop.
- checkLog.contains(secondary, 'dropCollection: ' + toColl.getFullName());
-
- // Rename target collection back to source collection. This helps to ensure the collection
- // metadata is updated correctly on both primary and secondary.
- assert.commandWorked(toColl.renameCollection(fromCollName + '_roundtrip'));
- replTest.awaitReplication();
- } finally {
- // Reset log level.
- primary.setLogLevel(previousLogLevel, 'storage');
-
- twoPhaseDropTest.stop();
- }
+ // Confirm in the logs that the renameCollection dropped the target collection on the
+ // secondary using two phase collection drop.
+ checkLog.contains(secondary, 'dropCollection: ' + toColl.getFullName());
+
+ // Rename target collection back to source collection. This helps to ensure the collection
+ // metadata is updated correctly on both primary and secondary.
+ assert.commandWorked(toColl.renameCollection(fromCollName + '_roundtrip'));
+ replTest.awaitReplication();
+} finally {
+ // Reset log level.
+ primary.setLogLevel(previousLogLevel, 'storage');
+
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_step_down.js b/jstests/replsets/drop_collections_two_phase_step_down.js
index 5d67d60e73a..849a1c82e29 100644
--- a/jstests/replsets/drop_collections_two_phase_step_down.js
+++ b/jstests/replsets/drop_collections_two_phase_step_down.js
@@ -13,58 +13,58 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- // Set up a two phase drop test.
- let testName = "drop_collection_two_phase_step_down";
- let dbName = testName;
- let collName = "collToDrop";
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+// Set up a two phase drop test.
+let testName = "drop_collection_two_phase_step_down";
+let dbName = testName;
+let collName = "collToDrop";
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ twoPhaseDropTest.stop();
+ return;
+}
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
- // Step primary down using {force: true} and wait for the same node to become primary again.
- // We use {force: true} because the current secondary has oplog application disabled and will
- // not be able to take over as primary.
- try {
- const primary = replTest.getPrimary();
- const primaryId = replTest.getNodeId(primary);
+// Step primary down using {force: true} and wait for the same node to become primary again.
+// We use {force: true} because the current secondary has oplog application disabled and will
+// not be able to take over as primary.
+try {
+ const primary = replTest.getPrimary();
+ const primaryId = replTest.getNodeId(primary);
- // Force step down primary.
- jsTestLog('Stepping down primary ' + primary.host + ' with {force: true}.');
- // The amount of time the node has to wait before becoming primary again.
- const stepDownSecs = 1;
- assert.commandWorked(primary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
+ // Force step down primary.
+ jsTestLog('Stepping down primary ' + primary.host + ' with {force: true}.');
+ // The amount of time the node has to wait before becoming primary again.
+ const stepDownSecs = 1;
+ assert.commandWorked(primary.adminCommand({replSetStepDown: stepDownSecs, force: true}));
- // Wait for the node that stepped down to regain PRIMARY status.
- jsTestLog('Waiting for node ' + primary.host + ' to become primary again');
- assert.eq(replTest.nodes[primaryId], replTest.getPrimary());
+ // Wait for the node that stepped down to regain PRIMARY status.
+ jsTestLog('Waiting for node ' + primary.host + ' to become primary again');
+ assert.eq(replTest.nodes[primaryId], replTest.getPrimary());
- jsTestLog('Node ' + primary.host + ' is now PRIMARY again. Checking if drop-pending' +
- ' collection still exists.');
- assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
- 'After stepping down and back up again, the primary ' + primary.host +
- ' removed drop-pending collection unexpectedly');
+ jsTestLog('Node ' + primary.host + ' is now PRIMARY again. Checking if drop-pending' +
+ ' collection still exists.');
+ assert(twoPhaseDropTest.collectionIsPendingDrop(collName),
+ 'After stepping down and back up again, the primary ' + primary.host +
+ ' removed drop-pending collection unexpectedly');
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
- } finally {
- twoPhaseDropTest.stop();
- }
+ // COMMIT collection drop.
+ twoPhaseDropTest.commitDropCollection(collName);
+} finally {
+ twoPhaseDropTest.stop();
+}
}());
diff --git a/jstests/replsets/drop_collections_two_phase_write_concern.js b/jstests/replsets/drop_collections_two_phase_write_concern.js
index 27b133cb915..e7b60eb18fb 100644
--- a/jstests/replsets/drop_collections_two_phase_write_concern.js
+++ b/jstests/replsets/drop_collections_two_phase_write_concern.js
@@ -4,81 +4,87 @@
*/
(function() {
- 'use strict';
-
- load('jstests/libs/check_log.js');
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
-
- // Alias to logging function in two_phase_drops.js
- const testLog = TwoPhaseDropCollectionTest._testLog;
-
- /**
- * Ensures that the operation fails with a write concern timeout.
- */
- function assertTimeout(result) {
- assert.writeErrorWithCode(result, ErrorCodes.WriteConcernFailed);
- assert(result.hasWriteConcernError(), tojson(result));
- assert(result.getWriteConcernError().errInfo.wtimeout, tojson(result));
- }
-
- // Set up a two phase drop test.
- let testName = 'drop_collection_two_phase_write_concern';
- let dbName = testName;
- let collName = 'collToDrop';
- let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
-
- // Initialize replica set.
- let replTest = twoPhaseDropTest.initReplSet();
-
- // Check for 'system.drop' two phase drop support.
- if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- twoPhaseDropTest.stop();
- return;
- }
-
- // Create the collection that will be dropped.
- twoPhaseDropTest.createCollection(collName);
-
- const primary = replTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const collForInserts = primaryDB.getCollection('collForInserts');
- const writeConcernForSuccessfulOp = {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS};
- assert.writeOK(collForInserts.insert({_id: 0}, {writeConcern: writeConcernForSuccessfulOp}));
-
- // PREPARE collection drop.
- twoPhaseDropTest.prepareDropCollection(collName);
-
- const writeConcernForTimedOutOp = {w: 'majority', wtimeout: 10000};
- assertTimeout(collForInserts.insert({_id: 1}, {writeConcern: writeConcernForTimedOutOp}));
-
- // Prevent drop collection reaper from making progress after resuming oplog application.
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'dropPendingCollectionReaperHang', mode: 'alwaysOn'}));
-
- try {
- // Ensure that drop pending collection is not removed after resuming oplog application.
- testLog('Restarting oplog application on the secondary node.');
- twoPhaseDropTest.resumeOplogApplication(twoPhaseDropTest.replTest.getSecondary());
-
- // Ensure that we've hit the failpoint before moving on.
- checkLog.contains(primary, 'fail point dropPendingCollectionReaperHang enabled');
-
- // While the drop pending collection reaper is blocked, an operation waiting on a majority
- // write concern should time out.
- assertTimeout(collForInserts.insert({_id: 2}, {writeConcern: writeConcernForTimedOutOp}));
- } finally {
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'dropPendingCollectionReaperHang', mode: 'off'}));
- }
-
- // After the reaper is unblocked, an operation waiting on a majority write concern should run
- // complete successfully.
- assert.writeOK(collForInserts.insert({_id: 3}, {writeConcern: writeConcernForSuccessfulOp}));
- assert.eq(4, collForInserts.find().itcount());
-
- // COMMIT collection drop.
- twoPhaseDropTest.commitDropCollection(collName);
+'use strict';
+load('jstests/libs/check_log.js');
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+
+// Alias to logging function in two_phase_drops.js
+const testLog = TwoPhaseDropCollectionTest._testLog;
+
+/**
+ * Ensures that the operation fails with a write concern timeout.
+ */
+function assertTimeout(result) {
+ assert.writeErrorWithCode(result, ErrorCodes.WriteConcernFailed);
+ assert(result.hasWriteConcernError(), tojson(result));
+ assert(result.getWriteConcernError().errInfo.wtimeout, tojson(result));
+}
+
+// Set up a two phase drop test.
+let testName = 'drop_collection_two_phase_write_concern';
+let dbName = testName;
+let collName = 'collToDrop';
+let twoPhaseDropTest = new TwoPhaseDropCollectionTest(testName, dbName);
+
+// Initialize replica set.
+let replTest = twoPhaseDropTest.initReplSet();
+
+// Check for 'system.drop' two phase drop support.
+if (!twoPhaseDropTest.supportsDropPendingNamespaces()) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
twoPhaseDropTest.stop();
+ return;
+}
+
+// Create the collection that will be dropped.
+twoPhaseDropTest.createCollection(collName);
+
+const primary = replTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const collForInserts = primaryDB.getCollection('collForInserts');
+const writeConcernForSuccessfulOp = {
+ w: 'majority',
+ wtimeout: replTest.kDefaultTimeoutMS
+};
+assert.writeOK(collForInserts.insert({_id: 0}, {writeConcern: writeConcernForSuccessfulOp}));
+
+// PREPARE collection drop.
+twoPhaseDropTest.prepareDropCollection(collName);
+
+const writeConcernForTimedOutOp = {
+ w: 'majority',
+ wtimeout: 10000
+};
+assertTimeout(collForInserts.insert({_id: 1}, {writeConcern: writeConcernForTimedOutOp}));
+
+// Prevent drop collection reaper from making progress after resuming oplog application.
+assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: 'dropPendingCollectionReaperHang', mode: 'alwaysOn'}));
+
+try {
+ // Ensure that drop pending collection is not removed after resuming oplog application.
+ testLog('Restarting oplog application on the secondary node.');
+ twoPhaseDropTest.resumeOplogApplication(twoPhaseDropTest.replTest.getSecondary());
+
+ // Ensure that we've hit the failpoint before moving on.
+ checkLog.contains(primary, 'fail point dropPendingCollectionReaperHang enabled');
+
+ // While the drop pending collection reaper is blocked, an operation waiting on a majority
+ // write concern should time out.
+ assertTimeout(collForInserts.insert({_id: 2}, {writeConcern: writeConcernForTimedOutOp}));
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'dropPendingCollectionReaperHang', mode: 'off'}));
+}
+
+// After the reaper is unblocked, an operation waiting on a majority write concern should run
+// complete successfully.
+assert.writeOK(collForInserts.insert({_id: 3}, {writeConcern: writeConcernForSuccessfulOp}));
+assert.eq(4, collForInserts.find().itcount());
+
+// COMMIT collection drop.
+twoPhaseDropTest.commitDropCollection(collName);
+
+twoPhaseDropTest.stop();
}());
diff --git a/jstests/replsets/drop_databases_two_phase.js b/jstests/replsets/drop_databases_two_phase.js
index 065da9b66e9..5a00ebe2e9e 100644
--- a/jstests/replsets/drop_databases_two_phase.js
+++ b/jstests/replsets/drop_databases_two_phase.js
@@ -15,166 +15,164 @@
*/
(function() {
- "use strict";
-
- load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
- load('jstests/libs/check_log.js');
-
- // Returns a list of all collections in a given database. Use 'args' as the
- // 'listCollections' command arguments.
- function listCollections(database, args) {
- var args = args || {};
- var failMsg = "'listCollections' command failed";
- var res = assert.commandWorked(database.runCommand("listCollections", args), failMsg);
- return res.cursor.firstBatch;
- }
-
- // Returns a list of 'drop-pending' collections. The collection names should be of the
- // format "system.drop.<optime>.<collectionName>", where 'optime' is the optime of the
- // collection drop operation, encoded as a string, and 'collectionName' is the original
- // collection name.
- function listDropPendingCollections(database) {
- var pendingDropRegex = new RegExp("system\.drop\..*\." + collNameToDrop + "$");
- var collections = listCollections(database, {includePendingDrops: true});
- return collections.filter(c => pendingDropRegex.test(c.name));
- }
-
- // Returns a list of all collection names in a given database.
- function listCollectionNames(database, args) {
- return listCollections(database, args).map(c => c.name);
- }
-
- // Sets a fail point on a specified node.
- function setFailPoint(node, failpoint, mode) {
- assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: mode}));
- }
+"use strict";
+
+load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
+load('jstests/libs/check_log.js');
+
+// Returns a list of all collections in a given database. Use 'args' as the
+// 'listCollections' command arguments.
+function listCollections(database, args) {
+ var args = args || {};
+ var failMsg = "'listCollections' command failed";
+ var res = assert.commandWorked(database.runCommand("listCollections", args), failMsg);
+ return res.cursor.firstBatch;
+}
+
+// Returns a list of 'drop-pending' collections. The collection names should be of the
+// format "system.drop.<optime>.<collectionName>", where 'optime' is the optime of the
+// collection drop operation, encoded as a string, and 'collectionName' is the original
+// collection name.
+function listDropPendingCollections(database) {
+ var pendingDropRegex = new RegExp("system\.drop\..*\." + collNameToDrop + "$");
+ var collections = listCollections(database, {includePendingDrops: true});
+ return collections.filter(c => pendingDropRegex.test(c.name));
+}
+
+// Returns a list of all collection names in a given database.
+function listCollectionNames(database, args) {
+ return listCollections(database, args).map(c => c.name);
+}
+
+// Sets a fail point on a specified node.
+function setFailPoint(node, failpoint, mode) {
+ assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: mode}));
+}
+
+var dbNameToDrop = 'dbToDrop';
+var replTest = new ReplSetTest({nodes: [{}, {}, {arbiter: true}]});
+
+// Initiate the replica set.
+replTest.startSet();
+replTest.initiate();
+replTest.awaitReplication();
+
+var primary = replTest.getPrimary();
+var secondary = replTest.getSecondary();
+
+var dbToDrop = primary.getDB(dbNameToDrop);
+var collNameToDrop = "collectionToDrop";
+
+// Create the collection that will be dropped and let it replicate.
+var collToDrop = dbToDrop.getCollection(collNameToDrop);
+assert.writeOK(
+ collToDrop.insert({_id: 0}, {writeConcern: {w: 2, wtimeout: replTest.kDefaultTimeoutMS}}));
+assert.eq(1, collToDrop.find().itcount());
+
+// Pause application on secondary so that commit point doesn't advance, meaning that a dropped
+// database on the primary will remain in 'drop-pending' state.
+jsTestLog("Pausing oplog application on the secondary node.");
+setFailPoint(secondary, "rsSyncApplyStop", "alwaysOn");
+
+// Make sure the collection was created.
+assert.contains(collNameToDrop,
+ listCollectionNames(dbToDrop),
+ "Collection '" + collNameToDrop + "' wasn't created properly");
+/**
+ * DROP DATABASE 'Collections' PHASE
+ */
+
+// Drop the collection on the primary.
+var dropDatabaseFn = function() {
var dbNameToDrop = 'dbToDrop';
- var replTest = new ReplSetTest({nodes: [{}, {}, {arbiter: true}]});
-
- // Initiate the replica set.
- replTest.startSet();
- replTest.initiate();
- replTest.awaitReplication();
-
- var primary = replTest.getPrimary();
- var secondary = replTest.getSecondary();
-
- var dbToDrop = primary.getDB(dbNameToDrop);
- var collNameToDrop = "collectionToDrop";
-
- // Create the collection that will be dropped and let it replicate.
- var collToDrop = dbToDrop.getCollection(collNameToDrop);
- assert.writeOK(
- collToDrop.insert({_id: 0}, {writeConcern: {w: 2, wtimeout: replTest.kDefaultTimeoutMS}}));
- assert.eq(1, collToDrop.find().itcount());
-
- // Pause application on secondary so that commit point doesn't advance, meaning that a dropped
- // database on the primary will remain in 'drop-pending' state.
- jsTestLog("Pausing oplog application on the secondary node.");
- setFailPoint(secondary, "rsSyncApplyStop", "alwaysOn");
-
- // Make sure the collection was created.
- assert.contains(collNameToDrop,
- listCollectionNames(dbToDrop),
- "Collection '" + collNameToDrop + "' wasn't created properly");
-
- /**
- * DROP DATABASE 'Collections' PHASE
- */
-
- // Drop the collection on the primary.
- var dropDatabaseFn = function() {
- var dbNameToDrop = 'dbToDrop';
- var primary = db.getMongo();
- jsTestLog(
- 'Dropping database ' + dbNameToDrop + ' on primary node ' + primary.host +
- '. This command will block because oplog application is paused on the secondary.');
- var dbToDrop = db.getSiblingDB(dbNameToDrop);
- assert.commandWorked(dbToDrop.dropDatabase());
- jsTestLog('Database ' + dbNameToDrop + ' successfully dropped on primary node ' +
- primary.host);
- };
- var dropDatabaseProcess = startParallelShell(dropDatabaseFn, primary.port);
-
- // Check that primary has started two phase drop of the collection.
- jsTestLog('Waiting for primary ' + primary.host + ' to prepare two phase drop of collection ' +
+ var primary = db.getMongo();
+ jsTestLog('Dropping database ' + dbNameToDrop + ' on primary node ' + primary.host +
+ '. This command will block because oplog application is paused on the secondary.');
+ var dbToDrop = db.getSiblingDB(dbNameToDrop);
+ assert.commandWorked(dbToDrop.dropDatabase());
+ jsTestLog('Database ' + dbNameToDrop + ' successfully dropped on primary node ' + primary.host);
+};
+var dropDatabaseProcess = startParallelShell(dropDatabaseFn, primary.port);
+
+// Check that primary has started two phase drop of the collection.
+jsTestLog('Waiting for primary ' + primary.host + ' to prepare two phase drop of collection ' +
+ collToDrop.getFullName());
+assert.soonNoExcept(
+ function() {
+ return collToDrop.find().itcount() == 0;
+ },
+ 'Primary ' + primary.host + ' failed to prepare two phase drop of collection ' +
+ collToDrop.getFullName());
+
+// 'collToDrop' is no longer visible with its original name. If 'system.drop' two phase drops
+// are supported by the storage engine, check for the drop-pending namespace using
+// listCollections.
+const supportsDropPendingNamespaces =
+ TwoPhaseDropCollectionTest.supportsDropPendingNamespaces(replTest);
+if (supportsDropPendingNamespaces) {
+ var dropPendingCollections = listDropPendingCollections(dbToDrop);
+ assert.eq(1,
+ dropPendingCollections.length,
+ "Collection was not found in the 'system.drop' namespace. " +
+ "Full drop-pending collection list: " + tojson(dropPendingCollections));
+ jsTestLog('Primary ' + primary.host + ' successfully started two phase drop of collection ' +
collToDrop.getFullName());
- assert.soonNoExcept(
- function() {
- return collToDrop.find().itcount() == 0;
- },
- 'Primary ' + primary.host + ' failed to prepare two phase drop of collection ' +
- collToDrop.getFullName());
-
- // 'collToDrop' is no longer visible with its original name. If 'system.drop' two phase drops
- // are supported by the storage engine, check for the drop-pending namespace using
- // listCollections.
- const supportsDropPendingNamespaces =
- TwoPhaseDropCollectionTest.supportsDropPendingNamespaces(replTest);
- if (supportsDropPendingNamespaces) {
- var dropPendingCollections = listDropPendingCollections(dbToDrop);
- assert.eq(1,
- dropPendingCollections.length,
- "Collection was not found in the 'system.drop' namespace. " +
- "Full drop-pending collection list: " + tojson(dropPendingCollections));
- jsTestLog('Primary ' + primary.host +
- ' successfully started two phase drop of collection ' + collToDrop.getFullName());
- }
-
- // Commands that manipulate the database being dropped or perform destructive catalog operations
- // should fail with the DatabaseDropPending error code while the database is in a drop-pending
- // state.
+}
+
+// Commands that manipulate the database being dropped or perform destructive catalog operations
+// should fail with the DatabaseDropPending error code while the database is in a drop-pending
+// state.
+assert.commandFailedWithCode(
+ dbToDrop.createCollection('collectionToCreateWhileDroppingDatabase'),
+ ErrorCodes.DatabaseDropPending,
+ 'collection creation should fail while we are in the process of dropping the database');
+
+// restartCatalog can only detect that a database is in a drop-pending state when 'system.drop'
+// namespaces are supported. Since 4.2, dropped collections are managed internally by the
+// storage engine. See serverStatus().
+if (supportsDropPendingNamespaces) {
assert.commandFailedWithCode(
- dbToDrop.createCollection('collectionToCreateWhileDroppingDatabase'),
+ dbToDrop.adminCommand('restartCatalog'),
ErrorCodes.DatabaseDropPending,
- 'collection creation should fail while we are in the process of dropping the database');
-
- // restartCatalog can only detect that a database is in a drop-pending state when 'system.drop'
- // namespaces are supported. Since 4.2, dropped collections are managed internally by the
- // storage engine. See serverStatus().
- if (supportsDropPendingNamespaces) {
- assert.commandFailedWithCode(
- dbToDrop.adminCommand('restartCatalog'),
- ErrorCodes.DatabaseDropPending,
- 'restartCatalog should fail if any databases are marked drop-pending');
- } else {
- // Drop-pending idents are known only to the storage engine and will be ignored by
- // restartCatalog.
- assert.commandWorked(dbToDrop.adminCommand('restartCatalog'));
- }
-
- /**
- * DROP DATABASE 'Database' PHASE
- */
-
- // Let the secondary apply the collection drop operation, so that the replica set commit point
- // will advance, and the 'Database' phase of the database drop will complete on the primary.
- jsTestLog("Restarting oplog application on the secondary node.");
- setFailPoint(secondary, "rsSyncApplyStop", "off");
-
- jsTestLog("Waiting for collection drop operation to replicate to all nodes.");
- replTest.awaitReplication();
-
- // Make sure the collection has been fully dropped. It should not appear as
- // a normal collection or under the 'system.drop' namespace any longer. Physical collection
- // drops may happen asynchronously, any time after the drop operation is committed, so we wait
- // to make sure the collection is eventually dropped.
- assert.soonNoExcept(function() {
- var dropPendingCollections = listDropPendingCollections(dbToDrop);
- jsTestLog('Drop pending collections: ' + tojson(dropPendingCollections));
- return dropPendingCollections.length == 0;
- });
-
- jsTestLog('Waiting for dropDatabase command on ' + primary.host + ' to complete.');
- var exitCode = dropDatabaseProcess();
-
- let db = primary.getDB(dbNameToDrop);
- checkLog.contains(db.getMongo(), "dropping collection: " + dbNameToDrop + "." + collNameToDrop);
- checkLog.contains(db.getMongo(), "dropped 1 collection(s)");
-
- assert.eq(0, exitCode, 'dropDatabase command on ' + primary.host + ' failed.');
- jsTestLog('Completed dropDatabase command on ' + primary.host);
-
- replTest.stopSet();
+ 'restartCatalog should fail if any databases are marked drop-pending');
+} else {
+ // Drop-pending idents are known only to the storage engine and will be ignored by
+ // restartCatalog.
+ assert.commandWorked(dbToDrop.adminCommand('restartCatalog'));
+}
+
+/**
+ * DROP DATABASE 'Database' PHASE
+ */
+
+// Let the secondary apply the collection drop operation, so that the replica set commit point
+// will advance, and the 'Database' phase of the database drop will complete on the primary.
+jsTestLog("Restarting oplog application on the secondary node.");
+setFailPoint(secondary, "rsSyncApplyStop", "off");
+
+jsTestLog("Waiting for collection drop operation to replicate to all nodes.");
+replTest.awaitReplication();
+
+// Make sure the collection has been fully dropped. It should not appear as
+// a normal collection or under the 'system.drop' namespace any longer. Physical collection
+// drops may happen asynchronously, any time after the drop operation is committed, so we wait
+// to make sure the collection is eventually dropped.
+assert.soonNoExcept(function() {
+ var dropPendingCollections = listDropPendingCollections(dbToDrop);
+ jsTestLog('Drop pending collections: ' + tojson(dropPendingCollections));
+ return dropPendingCollections.length == 0;
+});
+
+jsTestLog('Waiting for dropDatabase command on ' + primary.host + ' to complete.');
+var exitCode = dropDatabaseProcess();
+
+let db = primary.getDB(dbNameToDrop);
+checkLog.contains(db.getMongo(), "dropping collection: " + dbNameToDrop + "." + collNameToDrop);
+checkLog.contains(db.getMongo(), "dropped 1 collection(s)");
+
+assert.eq(0, exitCode, 'dropDatabase command on ' + primary.host + ' failed.');
+jsTestLog('Completed dropDatabase command on ' + primary.host);
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/drop_db.js b/jstests/replsets/drop_db.js
index 69f34eba059..49ee3e04406 100644
--- a/jstests/replsets/drop_db.js
+++ b/jstests/replsets/drop_db.js
@@ -4,57 +4,56 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- function checkWriteConcern(testFn, checkFn) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+function checkWriteConcern(testFn, checkFn) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- const sentinel = {};
- let cmdObjSeen = sentinel;
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- try {
- assert.doesNotThrow(testFn);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
-
- if (cmdObjSeen == sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " + testFn.toString());
- }
+ try {
+ assert.doesNotThrow(testFn);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
+ }
- checkFn(cmdObjSeen);
+ if (cmdObjSeen == sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + testFn.toString());
}
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const dbName = "dbDrop";
- const collName = "coll";
- const primaryDB = rst.getPrimary().getDB(dbName);
-
- primaryDB.createCollection(collName);
- checkWriteConcern(() => assert.commandWorked(primaryDB.dropDatabase({w: "majority"})),
- (cmdObj) => {
- assert.eq(cmdObj.writeConcern, {w: "majority"});
- });
-
- primaryDB.createCollection(collName);
- checkWriteConcern(() => assert.commandWorked(primaryDB.dropDatabase({w: 1})), (cmdObj) => {
- assert.eq(cmdObj.writeConcern, {w: 1});
- });
-
- primaryDB.createCollection(collName);
- checkWriteConcern(() => assert.commandFailedWithCode(primaryDB.dropDatabase({w: 100000}),
- ErrorCodes.UnsatisfiableWriteConcern),
- (cmdObj) => {
- assert.eq(cmdObj.writeConcern, {w: 100000});
- });
-
- rst.stopSet();
+ checkFn(cmdObjSeen);
+}
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const dbName = "dbDrop";
+const collName = "coll";
+const primaryDB = rst.getPrimary().getDB(dbName);
+
+primaryDB.createCollection(collName);
+checkWriteConcern(() => assert.commandWorked(primaryDB.dropDatabase({w: "majority"})), (cmdObj) => {
+ assert.eq(cmdObj.writeConcern, {w: "majority"});
+});
+
+primaryDB.createCollection(collName);
+checkWriteConcern(() => assert.commandWorked(primaryDB.dropDatabase({w: 1})), (cmdObj) => {
+ assert.eq(cmdObj.writeConcern, {w: 1});
+});
+
+primaryDB.createCollection(collName);
+checkWriteConcern(() => assert.commandFailedWithCode(primaryDB.dropDatabase({w: 100000}),
+ ErrorCodes.UnsatisfiableWriteConcern),
+ (cmdObj) => {
+ assert.eq(cmdObj.writeConcern, {w: 100000});
+ });
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/drop_oplog.js b/jstests/replsets/drop_oplog.js
index 2ba8dc44c72..a53da5ae483 100644
--- a/jstests/replsets/drop_oplog.js
+++ b/jstests/replsets/drop_oplog.js
@@ -2,35 +2,35 @@
// prohibited in a replset.
(function() {
- "use strict";
- let rt = new ReplSetTest({name: "drop_oplog", nodes: 1, oplogSize: 30});
+"use strict";
+let rt = new ReplSetTest({name: "drop_oplog", nodes: 1, oplogSize: 30});
- let nodes = rt.startSet();
- rt.initiate();
- let master = rt.getPrimary();
- let localDB = master.getDB('local');
+let nodes = rt.startSet();
+rt.initiate();
+let master = rt.getPrimary();
+let localDB = master.getDB('local');
- let threw = false;
+let threw = false;
- let ret = assert.commandFailed(localDB.runCommand({drop: 'oplog.rs'}));
- assert.eq('can\'t drop live oplog while replicating', ret.errmsg);
+let ret = assert.commandFailed(localDB.runCommand({drop: 'oplog.rs'}));
+assert.eq('can\'t drop live oplog while replicating', ret.errmsg);
- let dropOutput = localDB.dropDatabase();
- assert.eq(dropOutput.ok, 0);
- assert.eq(dropOutput.errmsg, "Cannot drop 'local' database while replication is active");
+let dropOutput = localDB.dropDatabase();
+assert.eq(dropOutput.ok, 0);
+assert.eq(dropOutput.errmsg, "Cannot drop 'local' database while replication is active");
- let adminDB = master.getDB('admin');
- dropOutput = adminDB.dropDatabase();
- assert.eq(dropOutput.ok, 0);
- assert.eq(dropOutput.errmsg, "Dropping the 'admin' database is prohibited.");
+let adminDB = master.getDB('admin');
+dropOutput = adminDB.dropDatabase();
+assert.eq(dropOutput.ok, 0);
+assert.eq(dropOutput.errmsg, "Dropping the 'admin' database is prohibited.");
- let renameOutput = localDB.oplog.rs.renameCollection("poison");
- assert.eq(renameOutput.ok, 0);
- assert.eq(renameOutput.errmsg, "can't rename live oplog while replicating");
+let renameOutput = localDB.oplog.rs.renameCollection("poison");
+assert.eq(renameOutput.ok, 0);
+assert.eq(renameOutput.errmsg, "can't rename live oplog while replicating");
- assert.writeOK(localDB.foo.insert({a: 1}));
- renameOutput = localDB.foo.renameCollection("oplog.rs");
- assert.eq(renameOutput.ok, 0);
- assert.eq(renameOutput.errmsg, "can't rename to live oplog while replicating");
- rt.stopSet();
+assert.writeOK(localDB.foo.insert({a: 1}));
+renameOutput = localDB.foo.renameCollection("oplog.rs");
+assert.eq(renameOutput.ok, 0);
+assert.eq(renameOutput.errmsg, "can't rename to live oplog while replicating");
+rt.stopSet();
}());
diff --git a/jstests/replsets/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js b/jstests/replsets/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js
index 4ba1be0dbdc..4c95e137ea4 100644
--- a/jstests/replsets/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js
+++ b/jstests/replsets/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js
@@ -11,34 +11,33 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/storage_engine_utils.js");
+load("jstests/libs/storage_engine_utils.js");
- const rt = new ReplSetTest({
- name: "drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command",
- nodes: 1
- });
+const rt = new ReplSetTest({
+ name: "drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command",
+ nodes: 1
+});
- // Start as a standalone node.
- rt.start(0, {noReplSet: true});
+// Start as a standalone node.
+rt.start(0, {noReplSet: true});
- let master = rt.getPrimary();
- let localDB = master.getDB('local');
+let master = rt.getPrimary();
+let localDB = master.getDB('local');
- // Standalone nodes don't start with an oplog; create one. The size of the oplog doesn't
- // matter. We are capping the oplog because some storage engines do not allow the creation
- // of uncapped oplog collections.
- assert.commandWorked(localDB.runCommand({create: 'oplog.rs', capped: true, size: 1000}));
+// Standalone nodes don't start with an oplog; create one. The size of the oplog doesn't
+// matter. We are capping the oplog because some storage engines do not allow the creation
+// of uncapped oplog collections.
+assert.commandWorked(localDB.runCommand({create: 'oplog.rs', capped: true, size: 1000}));
- if (storageEngineIsWiredTiger()) {
- const ret = assert.commandFailed(localDB.runCommand({drop: 'oplog.rs'}));
- assert.eq("can't drop oplog on storage engines that support replSetResizeOplog command",
- ret.errmsg);
- } else {
- assert.commandWorked(localDB.runCommand({drop: 'oplog.rs'}));
- }
-
- rt.stopSet();
+if (storageEngineIsWiredTiger()) {
+ const ret = assert.commandFailed(localDB.runCommand({drop: 'oplog.rs'}));
+ assert.eq("can't drop oplog on storage engines that support replSetResizeOplog command",
+ ret.errmsg);
+} else {
+ assert.commandWorked(localDB.runCommand({drop: 'oplog.rs'}));
+}
+rt.stopSet();
}()); \ No newline at end of file
diff --git a/jstests/replsets/election_handoff_basic.js b/jstests/replsets/election_handoff_basic.js
index c11a60612a2..2c1e27b6ece 100644
--- a/jstests/replsets/election_handoff_basic.js
+++ b/jstests/replsets/election_handoff_basic.js
@@ -5,22 +5,24 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_vanilla";
- const numNodes = 2;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_vanilla";
+const numNodes = 2;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- const config = rst.getReplSetConfig();
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+const config = rst.getReplSetConfig();
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 1);
+ElectionHandoffTest.testElectionHandoff(rst, 0, 1);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/election_handoff_flip.js b/jstests/replsets/election_handoff_flip.js
index 6e6c6f7bd66..c2576023048 100644
--- a/jstests/replsets/election_handoff_flip.js
+++ b/jstests/replsets/election_handoff_flip.js
@@ -4,24 +4,26 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_flip";
- const numNodes = 2;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_flip";
+const numNodes = 2;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- const config = rst.getReplSetConfig();
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+const config = rst.getReplSetConfig();
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 1);
- sleep(ElectionHandoffTest.stepDownPeriodSecs * 1000);
- ElectionHandoffTest.testElectionHandoff(rst, 1, 0);
+ElectionHandoffTest.testElectionHandoff(rst, 0, 1);
+sleep(ElectionHandoffTest.stepDownPeriodSecs * 1000);
+ElectionHandoffTest.testElectionHandoff(rst, 1, 0);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/election_handoff_higher_priority.js b/jstests/replsets/election_handoff_higher_priority.js
index 78a866a1201..12ac4914a40 100644
--- a/jstests/replsets/election_handoff_higher_priority.js
+++ b/jstests/replsets/election_handoff_higher_priority.js
@@ -6,26 +6,28 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_higher_priority";
- const numNodes = 3;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_higher_priority";
+const numNodes = 3;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- const config = rst.getReplSetConfig();
- config.members[0].priority = 3;
- config.members[1].priority = 1;
- config.members[2].priority = 2;
+const config = rst.getReplSetConfig();
+config.members[0].priority = 3;
+config.members[1].priority = 1;
+config.members[2].priority = 2;
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
+ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/election_handoff_one_unelectable.js b/jstests/replsets/election_handoff_one_unelectable.js
index 97546cbb1ea..970b605197c 100644
--- a/jstests/replsets/election_handoff_one_unelectable.js
+++ b/jstests/replsets/election_handoff_one_unelectable.js
@@ -6,24 +6,26 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_one_unelectable";
- const numNodes = 3;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_one_unelectable";
+const numNodes = 3;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- const config = rst.getReplSetConfig();
- config.members[1].priority = 0;
+const config = rst.getReplSetConfig();
+config.members[1].priority = 0;
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
+ElectionHandoffTest.testElectionHandoff(rst, 0, 2);
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/election_handoff_via_signal.js b/jstests/replsets/election_handoff_via_signal.js
index 4bc58c95d40..bca8d4b4991 100644
--- a/jstests/replsets/election_handoff_via_signal.js
+++ b/jstests/replsets/election_handoff_via_signal.js
@@ -4,22 +4,24 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
- const testName = "election_handoff_via_signal";
- const numNodes = 3;
- const rst = ReplSetTest({name: testName, nodes: numNodes});
- const nodes = rst.nodeList();
- rst.startSet();
+const testName = "election_handoff_via_signal";
+const numNodes = 3;
+const rst = ReplSetTest({name: testName, nodes: numNodes});
+const nodes = rst.nodeList();
+rst.startSet();
- // Make sure there are no election timeouts firing for the duration of the test. This helps
- // ensure that the test will only pass if the election handoff succeeds.
- const config = rst.getReplSetConfig();
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Make sure there are no election timeouts firing for the duration of the test. This helps
+// ensure that the test will only pass if the election handoff succeeds.
+const config = rst.getReplSetConfig();
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- ElectionHandoffTest.testElectionHandoff(rst, 0, 1, {stepDownBySignal: true});
+ElectionHandoffTest.testElectionHandoff(rst, 0, 1, {stepDownBySignal: true});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/emptycapped.js b/jstests/replsets/emptycapped.js
index b3aa6093be2..e15322935eb 100644
--- a/jstests/replsets/emptycapped.js
+++ b/jstests/replsets/emptycapped.js
@@ -1,98 +1,96 @@
// This tests the emptycapped command in a replica set.
(function() {
- "use strict";
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
-
- var primaryTestDB = rst.getPrimary().getDB('test');
- var primaryLocalDB = rst.getPrimary().getDB('local');
- var primaryAdminDB = rst.getPrimary().getDB('admin');
- var secondaryTestDB = rst.getSecondary().getDB('test');
-
- // Truncate a non-capped collection.
- assert.writeOK(primaryTestDB.noncapped.insert({x: 1}));
- assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'noncapped'}));
- assert.eq(primaryTestDB.noncapped.find().itcount(),
- 0,
- "Expected 0 documents to exist after emptying the collection");
-
- // Truncate a non-existent collection on a non-existent database.
- assert.commandWorked(rst.getPrimary().getDB('nonexistent').dropDatabase());
- assert.commandFailedWithCode(
- rst.getPrimary().getDB('nonexistent').runCommand({emptycapped: 'nonexistent'}),
- ErrorCodes.NamespaceNotFound);
-
- // Truncate a non-existent collection.
- primaryTestDB.nonexistent.drop();
- assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: 'nonexistent'}),
- ErrorCodes.NamespaceNotFound);
-
- // Truncate a capped collection.
- assert.commandWorked(primaryTestDB.createCollection("capped", {capped: true, size: 4096}));
- assert.writeOK(primaryTestDB.capped.insert({}));
- assert.eq(
- primaryTestDB.capped.find().itcount(), 1, "Expected 1 document to exist after an insert");
- assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'capped'}));
- assert.eq(primaryTestDB.capped.find().itcount(),
- 0,
- "Expected 0 documents to exist after emptying the collection");
-
- // Truncate a capped collection on a secondary.
- assert.commandFailedWithCode(secondaryTestDB.runCommand({emptycapped: 'capped'}),
- ErrorCodes.NotMaster);
-
- // Truncate the oplog.
- assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "oplog.rs"}),
- ErrorCodes.OplogOperationUnsupported);
-
- // Test system collections, which cannot be truncated except system.profile.
-
- // Truncate the local system.js collection.
- assert.writeOK(primaryTestDB.system.js.insert({_id: "mystring", value: "var root = this;"}));
- assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.js"}),
- ErrorCodes.IllegalOperation);
-
- // Truncate the system.profile collection.
- assert.commandWorked(
- primaryTestDB.createCollection("system.profile", {capped: true, size: 4096}));
- assert.commandWorked(primaryTestDB.runCommand({profile: 2}));
- assert.commandWorked(primaryTestDB.runCommand({emptycapped: "system.profile"}));
- assert.commandWorked(primaryTestDB.runCommand({profile: 0}));
- assert(primaryTestDB.system.profile.drop(), "Failed to drop the system.profile collection");
-
- // Truncate the local system.replset collection.
- assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "system.replset"}),
- ErrorCodes.IllegalOperation);
-
- // Test user & role management system collections.
- assert.commandWorked(primaryAdminDB.runCommand({
- createRole: "all1",
- privileges: [{resource: {db: "", collection: ""}, actions: ["anyAction"]}],
- roles: []
- }));
- assert.commandWorked(primaryAdminDB.runCommand(
- {createUser: "root2", pwd: "pwd", roles: [{role: "root", db: "admin"}]}));
-
- // TODO: Test system.backup_users & system.new_users.
-
- // Truncate the admin system.roles collection.
- assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.roles"}),
- ErrorCodes.IllegalOperation);
-
- // Truncate the admin system.users collection.
- assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.users"}),
- ErrorCodes.IllegalOperation);
-
- // Truncate the admin system.version collection.
- assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.version"}),
- ErrorCodes.IllegalOperation);
-
- // Truncate the local system.views collection.
- assert.commandWorked(primaryTestDB.runCommand(
- {create: "view1", viewOn: "collection", pipeline: [{$match: {}}]}));
- assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.views"}),
- ErrorCodes.IllegalOperation);
- rst.stopSet();
+"use strict";
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+
+var primaryTestDB = rst.getPrimary().getDB('test');
+var primaryLocalDB = rst.getPrimary().getDB('local');
+var primaryAdminDB = rst.getPrimary().getDB('admin');
+var secondaryTestDB = rst.getSecondary().getDB('test');
+
+// Truncate a non-capped collection.
+assert.writeOK(primaryTestDB.noncapped.insert({x: 1}));
+assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'noncapped'}));
+assert.eq(primaryTestDB.noncapped.find().itcount(),
+ 0,
+ "Expected 0 documents to exist after emptying the collection");
+
+// Truncate a non-existent collection on a non-existent database.
+assert.commandWorked(rst.getPrimary().getDB('nonexistent').dropDatabase());
+assert.commandFailedWithCode(
+ rst.getPrimary().getDB('nonexistent').runCommand({emptycapped: 'nonexistent'}),
+ ErrorCodes.NamespaceNotFound);
+
+// Truncate a non-existent collection.
+primaryTestDB.nonexistent.drop();
+assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: 'nonexistent'}),
+ ErrorCodes.NamespaceNotFound);
+
+// Truncate a capped collection.
+assert.commandWorked(primaryTestDB.createCollection("capped", {capped: true, size: 4096}));
+assert.writeOK(primaryTestDB.capped.insert({}));
+assert.eq(primaryTestDB.capped.find().itcount(), 1, "Expected 1 document to exist after an insert");
+assert.commandWorked(primaryTestDB.runCommand({emptycapped: 'capped'}));
+assert.eq(primaryTestDB.capped.find().itcount(),
+ 0,
+ "Expected 0 documents to exist after emptying the collection");
+
+// Truncate a capped collection on a secondary.
+assert.commandFailedWithCode(secondaryTestDB.runCommand({emptycapped: 'capped'}),
+ ErrorCodes.NotMaster);
+
+// Truncate the oplog.
+assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "oplog.rs"}),
+ ErrorCodes.OplogOperationUnsupported);
+
+// Test system collections, which cannot be truncated except system.profile.
+
+// Truncate the local system.js collection.
+assert.writeOK(primaryTestDB.system.js.insert({_id: "mystring", value: "var root = this;"}));
+assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.js"}),
+ ErrorCodes.IllegalOperation);
+
+// Truncate the system.profile collection.
+assert.commandWorked(primaryTestDB.createCollection("system.profile", {capped: true, size: 4096}));
+assert.commandWorked(primaryTestDB.runCommand({profile: 2}));
+assert.commandWorked(primaryTestDB.runCommand({emptycapped: "system.profile"}));
+assert.commandWorked(primaryTestDB.runCommand({profile: 0}));
+assert(primaryTestDB.system.profile.drop(), "Failed to drop the system.profile collection");
+
+// Truncate the local system.replset collection.
+assert.commandFailedWithCode(primaryLocalDB.runCommand({emptycapped: "system.replset"}),
+ ErrorCodes.IllegalOperation);
+
+// Test user & role management system collections.
+assert.commandWorked(primaryAdminDB.runCommand({
+ createRole: "all1",
+ privileges: [{resource: {db: "", collection: ""}, actions: ["anyAction"]}],
+ roles: []
+}));
+assert.commandWorked(primaryAdminDB.runCommand(
+ {createUser: "root2", pwd: "pwd", roles: [{role: "root", db: "admin"}]}));
+
+// TODO: Test system.backup_users & system.new_users.
+
+// Truncate the admin system.roles collection.
+assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.roles"}),
+ ErrorCodes.IllegalOperation);
+
+// Truncate the admin system.users collection.
+assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.users"}),
+ ErrorCodes.IllegalOperation);
+
+// Truncate the admin system.version collection.
+assert.commandFailedWithCode(primaryAdminDB.runCommand({emptycapped: "system.version"}),
+ ErrorCodes.IllegalOperation);
+
+// Truncate the local system.views collection.
+assert.commandWorked(
+ primaryTestDB.runCommand({create: "view1", viewOn: "collection", pipeline: [{$match: {}}]}));
+assert.commandFailedWithCode(primaryTestDB.runCommand({emptycapped: "system.views"}),
+ ErrorCodes.IllegalOperation);
+rst.stopSet();
})();
diff --git a/jstests/replsets/failcommand_ignores_internal.js b/jstests/replsets/failcommand_ignores_internal.js
index 1d2a4f17e93..a1d6f2c82cd 100644
--- a/jstests/replsets/failcommand_ignores_internal.js
+++ b/jstests/replsets/failcommand_ignores_internal.js
@@ -1,35 +1,39 @@
// Tests that the "failCommand" failpoint ignores commands from internal clients: SERVER-34943.
// @tags: [requires_replication]
(function() {
- "use strict";
+"use strict";
- // Prevent elections.
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {votes: 0, priority: 0}}]});
- replTest.startSet();
- replTest.initiate();
- const primary = replTest.getPrimary();
- const testDB = primary.getDB("test_failcommand_ignores_internal");
+// Prevent elections.
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {votes: 0, priority: 0}}]});
+replTest.startSet();
+replTest.initiate();
+const primary = replTest.getPrimary();
+const testDB = primary.getDB("test_failcommand_ignores_internal");
- // Enough documents for three getMores.
- assert.commandWorked(testDB.collection.insertMany([{}, {}, {}]));
- const findReply = assert.commandWorked(testDB.runCommand({find: "collection", batchSize: 0}));
- const cursorId = findReply.cursor.id;
+// Enough documents for three getMores.
+assert.commandWorked(testDB.collection.insertMany([{}, {}, {}]));
+const findReply = assert.commandWorked(testDB.runCommand({find: "collection", batchSize: 0}));
+const cursorId = findReply.cursor.id;
- // Test failing twice with a particular error code.
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: {times: 2},
- data: {errorCode: ErrorCodes.BadValue, failCommands: ["getMore"]}
- }));
- const getMore = {getMore: cursorId, collection: "collection", batchSize: 1};
- assert.commandFailedWithCode(testDB.runCommand(getMore), ErrorCodes.BadValue);
+// Test failing twice with a particular error code.
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: {times: 2},
+ data: {errorCode: ErrorCodes.BadValue, failCommands: ["getMore"]}
+}));
+const getMore = {
+ getMore: cursorId,
+ collection: "collection",
+ batchSize: 1
+};
+assert.commandFailedWithCode(testDB.runCommand(getMore), ErrorCodes.BadValue);
- // Waits for secondaries to do getMores on the oplog, which should be ignored by failCommand.
- assert.commandWorked(testDB.collection.insertOne({}, {writeConcern: {w: 2}}));
+// Waits for secondaries to do getMores on the oplog, which should be ignored by failCommand.
+assert.commandWorked(testDB.collection.insertOne({}, {writeConcern: {w: 2}}));
- // Second getMore fails but third succeeds, because configureFailPoint was passed {times: 2}.
- assert.commandFailedWithCode(testDB.runCommand(getMore), ErrorCodes.BadValue);
- assert.commandWorked(testDB.runCommand(getMore));
+// Second getMore fails but third succeeds, because configureFailPoint was passed {times: 2}.
+assert.commandFailedWithCode(testDB.runCommand(getMore), ErrorCodes.BadValue);
+assert.commandWorked(testDB.runCommand(getMore));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/find_and_modify_wc.js b/jstests/replsets/find_and_modify_wc.js
index c8055d97ef5..236ddad1afb 100644
--- a/jstests/replsets/find_and_modify_wc.js
+++ b/jstests/replsets/find_and_modify_wc.js
@@ -2,78 +2,76 @@
// Tests writeConcerns with findAndModify command
//
(function() {
- 'use strict';
+'use strict';
- // Skip this test if running with the "wiredTiger" storage engine, since it requires
- // using 'nojournal' in a replica set, which is not supported when using WT.
- if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
- return;
- }
+// Skip this test if running with the "wiredTiger" storage engine, since it requires
+// using 'nojournal' in a replica set, which is not supported when using WT.
+if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
+ // WT is currently the default engine so it is used when 'storageEngine' is not set.
+ jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+ return;
+}
- var nodeCount = 3;
- var rst = new ReplSetTest({nodes: nodeCount});
- rst.startSet({nojournal: ""});
- rst.initiate();
+var nodeCount = 3;
+var rst = new ReplSetTest({nodes: nodeCount});
+rst.startSet({nojournal: ""});
+rst.initiate();
- var primary = rst.getPrimary();
- var coll = primary.getCollection("test.find_and_modify_wc");
- coll.remove({});
+var primary = rst.getPrimary();
+var coll = primary.getCollection("test.find_and_modify_wc");
+coll.remove({});
- // insert some documents
- var docs = [];
- for (var i = 1; i <= 5; ++i) {
- docs.push({i: i, j: 2 * i});
- }
- var res =
- coll.runCommand({insert: coll.getName(), documents: docs, writeConcern: {w: nodeCount}});
- assert(res.ok);
- assert.eq(5, coll.find().itcount());
+// insert some documents
+var docs = [];
+for (var i = 1; i <= 5; ++i) {
+ docs.push({i: i, j: 2 * i});
+}
+var res = coll.runCommand({insert: coll.getName(), documents: docs, writeConcern: {w: nodeCount}});
+assert(res.ok);
+assert.eq(5, coll.find().itcount());
- // use for updates in subsequent runCommand calls
- var reqUpdate = {
- findAndModify: coll.getName(),
- query: {i: 3},
- update: {$inc: {j: 1}},
- writeConcern: {w: 'majority'}
- };
+// use for updates in subsequent runCommand calls
+var reqUpdate = {
+ findAndModify: coll.getName(),
+ query: {i: 3},
+ update: {$inc: {j: 1}},
+ writeConcern: {w: 'majority'}
+};
- // Verify findAndModify returns old document new: false
- var res = coll.runCommand(reqUpdate);
- assert(res.ok);
- assert(res.value);
- // (2 * res.value.i) == 6 == res.value.j (old document)
- assert.eq(2 * res.value.i, res.value.j);
- assert(!res.writeConcernError);
+// Verify findAndModify returns old document new: false
+var res = coll.runCommand(reqUpdate);
+assert(res.ok);
+assert(res.value);
+// (2 * res.value.i) == 6 == res.value.j (old document)
+assert.eq(2 * res.value.i, res.value.j);
+assert(!res.writeConcernError);
- // Verify findAndModify returns new document with new: true
- reqUpdate.new = true;
- res = coll.runCommand(reqUpdate);
- assert(res.ok);
- assert(res.value);
- // (2 * res.value.i + 2) == 8 == res.value.j (new document after two updates)
- assert.eq(2 * res.value.i + 2, res.value.j);
- assert(!res.writeConcernError);
-
- // Verify findAndModify remove works
- res = coll.runCommand(
- {findAndModify: coll.getName(), sort: {i: 1}, remove: true, writeConcern: {w: nodeCount}});
- assert.eq(res.value.i, 1);
- assert.eq(coll.find().itcount(), 4);
- assert(!res.writeConcernError);
+// Verify findAndModify returns new document with new: true
+reqUpdate.new = true;
+res = coll.runCommand(reqUpdate);
+assert(res.ok);
+assert(res.value);
+// (2 * res.value.i + 2) == 8 == res.value.j (new document after two updates)
+assert.eq(2 * res.value.i + 2, res.value.j);
+assert(!res.writeConcernError);
- // Verify findAndModify returns writeConcernError
- // when given invalid writeConcerns
- [{w: 'invalid'}, {w: nodeCount + 1}].forEach(function(wc) {
- reqUpdate.writeConcern = wc;
- res = coll.runCommand(reqUpdate);
+// Verify findAndModify remove works
+res = coll.runCommand(
+ {findAndModify: coll.getName(), sort: {i: 1}, remove: true, writeConcern: {w: nodeCount}});
+assert.eq(res.value.i, 1);
+assert.eq(coll.find().itcount(), 4);
+assert(!res.writeConcernError);
- assert(res.writeConcernError);
- assert(res.writeConcernError.code);
- assert(res.writeConcernError.errmsg);
- });
+// Verify findAndModify returns writeConcernError
+// when given invalid writeConcerns
+[{w: 'invalid'}, {w: nodeCount + 1}].forEach(function(wc) {
+ reqUpdate.writeConcern = wc;
+ res = coll.runCommand(reqUpdate);
- rst.stopSet();
+ assert(res.writeConcernError);
+ assert(res.writeConcernError.code);
+ assert(res.writeConcernError.errmsg);
+});
+rst.stopSet();
})();
diff --git a/jstests/replsets/force_sync_source_candidate.js b/jstests/replsets/force_sync_source_candidate.js
index 4be7b3bb668..c359c4ac668 100644
--- a/jstests/replsets/force_sync_source_candidate.js
+++ b/jstests/replsets/force_sync_source_candidate.js
@@ -5,36 +5,35 @@
*/
(function() {
- "use strict";
-
- const failpointName = "forceSyncSourceCandidate";
-
- const rst = new ReplSetTest({
- nodes:
- [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- // Allow many initial sync attempts. Initial sync may fail if the sync source does not have
- // an oplog yet because it has not conducted its own initial sync yet.
- // We turn on the noop writer to encourage successful sync source selection.
- nodeOptions: {setParameter: {numInitialSyncAttempts: 100, writePeriodicNoops: true}}
- });
- const nodes = rst.startSet();
-
- function setFailPoint(node, syncSource) {
- const dataObj = {hostAndPort: syncSource.host};
- assert.commandWorked(node.adminCommand(
- {configureFailPoint: failpointName, mode: "alwaysOn", data: dataObj}));
- }
-
- setFailPoint(nodes[1], nodes[0]);
- setFailPoint(nodes[2], nodes[1]);
- setFailPoint(nodes[3], nodes[2]);
-
- rst.initiate();
- const primary = rst.getPrimary();
-
- rst.awaitSyncSource(nodes[1], nodes[0]);
- rst.awaitSyncSource(nodes[2], nodes[1]);
- rst.awaitSyncSource(nodes[3], nodes[2]);
-
- rst.stopSet();
+"use strict";
+
+const failpointName = "forceSyncSourceCandidate";
+
+const rst = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ // Allow many initial sync attempts. Initial sync may fail if the sync source does not have
+ // an oplog yet because it has not conducted its own initial sync yet.
+ // We turn on the noop writer to encourage successful sync source selection.
+ nodeOptions: {setParameter: {numInitialSyncAttempts: 100, writePeriodicNoops: true}}
+});
+const nodes = rst.startSet();
+
+function setFailPoint(node, syncSource) {
+ const dataObj = {hostAndPort: syncSource.host};
+ assert.commandWorked(
+ node.adminCommand({configureFailPoint: failpointName, mode: "alwaysOn", data: dataObj}));
+}
+
+setFailPoint(nodes[1], nodes[0]);
+setFailPoint(nodes[2], nodes[1]);
+setFailPoint(nodes[3], nodes[2]);
+
+rst.initiate();
+const primary = rst.getPrimary();
+
+rst.awaitSyncSource(nodes[1], nodes[0]);
+rst.awaitSyncSource(nodes[2], nodes[1]);
+rst.awaitSyncSource(nodes[3], nodes[2]);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/freeze_timeout.js b/jstests/replsets/freeze_timeout.js
index 498d190b1ec..0bf193bbdc6 100644
--- a/jstests/replsets/freeze_timeout.js
+++ b/jstests/replsets/freeze_timeout.js
@@ -3,50 +3,50 @@
* node replica set both after a freeze timeout and after a stepdown timeout expires.
*/
(function() {
- "use strict";
- load('jstests/replsets/libs/election_metrics.js');
-
- jsTestLog('1: initialize single node replica set');
- const replSet = new ReplSetTest({name: 'freeze_timeout', nodes: 1});
- const nodes = replSet.startSet();
- const config = replSet.getReplSetConfig();
- replSet.initiate(config);
- replSet.awaitReplication();
- let primary = replSet.getPrimary();
- const initialPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
-
- jsTestLog('2: step down primary');
- assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 10, force: 1}));
-
- jsTestLog('3: wait for stepped down node to become primary again');
- primary = replSet.getPrimary();
-
- // Check that both the 'called' and 'successful' fields of the 'freezeTimeout' election reason
- // counter have been incremented in serverStatus. When a stepdown timeout expires in a single
- // node replica set, an election is called for the same reason as is used when a freeze timeout
- // expires.
- let newPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
- verifyServerStatusElectionReasonCounterChange(
- initialPrimaryStatus.electionMetrics, newPrimaryStatus.electionMetrics, "freezeTimeout", 1);
-
- jsTestLog('4: step down primary again');
- assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 10, force: 1}));
-
- jsTestLog('5: freeze stepped down primary for 30 seconds');
- primary.getDB("admin").runCommand({replSetFreeze: 30});
- sleep(1000);
-
- jsTestLog('6: unfreeze stepped down primary after waiting for 1 second');
- primary.getDB("admin").runCommand({replSetFreeze: 0});
-
- jsTestLog('7: wait for unfrozen node to become primary again');
- primary = replSet.getPrimary();
-
- // Check that both the 'called' and 'successful' fields of the 'freezeTimeout' election reason
- // counter have been incremented again in serverStatus.
- newPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
- verifyServerStatusElectionReasonCounterChange(
- initialPrimaryStatus.electionMetrics, newPrimaryStatus.electionMetrics, "freezeTimeout", 2);
-
- replSet.stopSet();
+"use strict";
+load('jstests/replsets/libs/election_metrics.js');
+
+jsTestLog('1: initialize single node replica set');
+const replSet = new ReplSetTest({name: 'freeze_timeout', nodes: 1});
+const nodes = replSet.startSet();
+const config = replSet.getReplSetConfig();
+replSet.initiate(config);
+replSet.awaitReplication();
+let primary = replSet.getPrimary();
+const initialPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+
+jsTestLog('2: step down primary');
+assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 10, force: 1}));
+
+jsTestLog('3: wait for stepped down node to become primary again');
+primary = replSet.getPrimary();
+
+// Check that both the 'called' and 'successful' fields of the 'freezeTimeout' election reason
+// counter have been incremented in serverStatus. When a stepdown timeout expires in a single
+// node replica set, an election is called for the same reason as is used when a freeze timeout
+// expires.
+let newPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+verifyServerStatusElectionReasonCounterChange(
+ initialPrimaryStatus.electionMetrics, newPrimaryStatus.electionMetrics, "freezeTimeout", 1);
+
+jsTestLog('4: step down primary again');
+assert.commandWorked(primary.getDB("admin").runCommand({replSetStepDown: 10, force: 1}));
+
+jsTestLog('5: freeze stepped down primary for 30 seconds');
+primary.getDB("admin").runCommand({replSetFreeze: 30});
+sleep(1000);
+
+jsTestLog('6: unfreeze stepped down primary after waiting for 1 second');
+primary.getDB("admin").runCommand({replSetFreeze: 0});
+
+jsTestLog('7: wait for unfrozen node to become primary again');
+primary = replSet.getPrimary();
+
+// Check that both the 'called' and 'successful' fields of the 'freezeTimeout' election reason
+// counter have been incremented again in serverStatus.
+newPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+verifyServerStatusElectionReasonCounterChange(
+ initialPrimaryStatus.electionMetrics, newPrimaryStatus.electionMetrics, "freezeTimeout", 2);
+
+replSet.stopSet();
})();
diff --git a/jstests/replsets/fsync_lock_read_secondaries.js b/jstests/replsets/fsync_lock_read_secondaries.js
index 1f86f420559..91e891f79d6 100644
--- a/jstests/replsets/fsync_lock_read_secondaries.js
+++ b/jstests/replsets/fsync_lock_read_secondaries.js
@@ -22,52 +22,51 @@
* witness as an increase in the count of documents stored on the secondary.
*/
(function() {
- "use strict";
- // Load utility methods for replica set tests
- load("jstests/replsets/rslib.js");
+"use strict";
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
- var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5});
- // Start each mongod in the replica set. Returns a list of nodes
- var nodes = replTest.startSet();
- // This will wait for initiation
- replTest.initiate();
- var master = replTest.getPrimary();
+var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5});
+// Start each mongod in the replica set. Returns a list of nodes
+var nodes = replTest.startSet();
+// This will wait for initiation
+replTest.initiate();
+var master = replTest.getPrimary();
- var ret = master.getDB("admin").fsyncLock();
- if (!ret.ok) {
- assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
- jsTestLog("Storage Engine does not support fsyncLock, so bailing");
- return;
- }
- master.getDB("admin").fsyncUnlock();
+var ret = master.getDB("admin").fsyncLock();
+if (!ret.ok) {
+ assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
+ jsTestLog("Storage Engine does not support fsyncLock, so bailing");
+ return;
+}
+master.getDB("admin").fsyncUnlock();
- var docNum = 100;
- for (var i = 0; i < docNum; i++) {
- master.getDB("foo").bar.save({a: i});
- }
- waitForAllMembers(master.getDB("foo"));
- replTest.awaitReplication();
+var docNum = 100;
+for (var i = 0; i < docNum; i++) {
+ master.getDB("foo").bar.save({a: i});
+}
+waitForAllMembers(master.getDB("foo"));
+replTest.awaitReplication();
- // Calling getPrimary also populates '_slaves'.
- var slaves = replTest._slaves;
- slaves[0].setSlaveOk();
+// Calling getPrimary also populates '_slaves'.
+var slaves = replTest._slaves;
+slaves[0].setSlaveOk();
- assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
- var docNum = 1000;
- for (var i = 0; i < docNum; i++) {
- master.getDB("foo").bar.save({a: i});
- }
- // Issue a read query on the secondary while holding the fsync lock.
- // This is what we are testing. Previously this would block. After the fix
- // this should work just fine.
- var slave0count = slaves[0].getDB("foo").bar.find().itcount();
- assert.eq(
- slave0count, 100, "Doc count in fsync lock wrong. Expected (=100), found " + slave0count);
- assert(slaves[0].getDB("admin").fsyncUnlock().ok);
+assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
+var docNum = 1000;
+for (var i = 0; i < docNum; i++) {
+ master.getDB("foo").bar.save({a: i});
+}
+// Issue a read query on the secondary while holding the fsync lock.
+// This is what we are testing. Previously this would block. After the fix
+// this should work just fine.
+var slave0count = slaves[0].getDB("foo").bar.find().itcount();
+assert.eq(slave0count, 100, "Doc count in fsync lock wrong. Expected (=100), found " + slave0count);
+assert(slaves[0].getDB("admin").fsyncUnlock().ok);
- // The secondary should have equal or more documents than what it had before.
- assert.soon(function() {
- return slaves[0].getDB("foo").bar.find().itcount() > 100;
- }, "count of documents stored on the secondary did not increase");
- replTest.stopSet();
+// The secondary should have equal or more documents than what it had before.
+assert.soon(function() {
+ return slaves[0].getDB("foo").bar.find().itcount() > 100;
+}, "count of documents stored on the secondary did not increase");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/get_replication_info_helper.js b/jstests/replsets/get_replication_info_helper.js
index 5e2a696bb80..32b0c2af766 100644
--- a/jstests/replsets/get_replication_info_helper.js
+++ b/jstests/replsets/get_replication_info_helper.js
@@ -1,49 +1,48 @@
// Tests the output of db.getReplicationInfo() and tests db.printSlaveReplicationInfo().
(function() {
- "use strict";
- var name = "getReplicationInfo";
- var replSet = new ReplSetTest({name: name, nodes: 3, oplogSize: 50});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate();
-
- var primary = replSet.getPrimary();
- for (var i = 0; i < 100; i++) {
- primary.getDB('test').foo.insert({a: i});
- }
- replSet.awaitReplication();
-
- var replInfo = primary.getDB('admin').getReplicationInfo();
- var replInfoString = tojson(replInfo);
-
- assert.eq(50, replInfo.logSizeMB, replInfoString);
- assert.lt(0, replInfo.usedMB, replInfoString);
- assert.lte(0, replInfo.timeDiff, replInfoString);
- assert.lte(0, replInfo.timeDiffHours, replInfoString);
- // Just make sure the following fields exist since it would be hard to predict their values
- assert(replInfo.tFirst, replInfoString);
- assert(replInfo.tLast, replInfoString);
- assert(replInfo.now, replInfoString);
-
- // calling this function with and without a primary, should provide sufficient code coverage
- // to catch any JS errors
- var mongo =
- startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
- mongo();
- assert(rawMongoProgramOutput().match("behind the primary"));
-
- // get to a primaryless state
- for (i in replSet._slaves) {
- var secondary = replSet._slaves[i];
- secondary.getDB('admin').runCommand({replSetFreeze: 120});
- }
- assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 120, force: true}));
-
- mongo =
- startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
- mongo();
- assert(rawMongoProgramOutput().match("behind the freshest"));
-
- replSet.stopSet();
+"use strict";
+var name = "getReplicationInfo";
+var replSet = new ReplSetTest({name: name, nodes: 3, oplogSize: 50});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate();
+
+var primary = replSet.getPrimary();
+for (var i = 0; i < 100; i++) {
+ primary.getDB('test').foo.insert({a: i});
+}
+replSet.awaitReplication();
+
+var replInfo = primary.getDB('admin').getReplicationInfo();
+var replInfoString = tojson(replInfo);
+
+assert.eq(50, replInfo.logSizeMB, replInfoString);
+assert.lt(0, replInfo.usedMB, replInfoString);
+assert.lte(0, replInfo.timeDiff, replInfoString);
+assert.lte(0, replInfo.timeDiffHours, replInfoString);
+// Just make sure the following fields exist since it would be hard to predict their values
+assert(replInfo.tFirst, replInfoString);
+assert(replInfo.tLast, replInfoString);
+assert(replInfo.now, replInfoString);
+
+// calling this function with and without a primary, should provide sufficient code coverage
+// to catch any JS errors
+var mongo =
+ startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
+mongo();
+assert(rawMongoProgramOutput().match("behind the primary"));
+
+// get to a primaryless state
+for (i in replSet._slaves) {
+ var secondary = replSet._slaves[i];
+ secondary.getDB('admin').runCommand({replSetFreeze: 120});
+}
+assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 120, force: true}));
+
+mongo = startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
+mongo();
+assert(rawMongoProgramOutput().match("behind the freshest"));
+
+replSet.stopSet();
})();
diff --git a/jstests/replsets/get_status.js b/jstests/replsets/get_status.js
index 31a49dc1300..fab6a2035b4 100644
--- a/jstests/replsets/get_status.js
+++ b/jstests/replsets/get_status.js
@@ -4,25 +4,25 @@
*/
(function() {
- "use strict";
- var name = "getstatus";
- var numNodes = 4;
- var replTest = new ReplSetTest({name: name, nodes: numNodes});
- var nodes = replTest.startSet();
+"use strict";
+var name = "getstatus";
+var numNodes = 4;
+var replTest = new ReplSetTest({name: name, nodes: numNodes});
+var nodes = replTest.startSet();
- var config = replTest.getReplSetConfig();
- config.members[numNodes - 1].arbiterOnly = true;
- // An invalid time to get status
- var statusBeforeInitCode = 94;
- assert.commandFailedWithCode(nodes[0].getDB("admin").runCommand({replSetGetStatus: 1}),
- statusBeforeInitCode,
- "replSetGetStatus should fail before initializing.");
- replTest.initiate(config);
- replTest.awaitSecondaryNodes();
+var config = replTest.getReplSetConfig();
+config.members[numNodes - 1].arbiterOnly = true;
+// An invalid time to get status
+var statusBeforeInitCode = 94;
+assert.commandFailedWithCode(nodes[0].getDB("admin").runCommand({replSetGetStatus: 1}),
+ statusBeforeInitCode,
+ "replSetGetStatus should fail before initializing.");
+replTest.initiate(config);
+replTest.awaitSecondaryNodes();
- // A valid status
- var primary = replTest.getPrimary();
- assert.commandWorked(primary.getDB("admin").runCommand({replSetGetStatus: 1}));
+// A valid status
+var primary = replTest.getPrimary();
+assert.commandWorked(primary.getDB("admin").runCommand({replSetGetStatus: 1}));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js
index 6dfbe3047fc..cb63de5cf6d 100644
--- a/jstests/replsets/groupAndMapReduce.js
+++ b/jstests/replsets/groupAndMapReduce.js
@@ -1,7 +1,6 @@
load("jstests/replsets/rslib.js");
doTest = function(signal) {
-
// Test basic replica set functionality.
// -- Replication
// -- Failover
@@ -83,7 +82,6 @@ doTest = function(signal) {
} catch (e) {
print("Received exception: " + e);
}
-
});
// Shut down the set and finish the test.
diff --git a/jstests/replsets/hang_before_releasing_transaction_oplog_hole.js b/jstests/replsets/hang_before_releasing_transaction_oplog_hole.js
index af74be61d60..7156721fbc3 100644
--- a/jstests/replsets/hang_before_releasing_transaction_oplog_hole.js
+++ b/jstests/replsets/hang_before_releasing_transaction_oplog_hole.js
@@ -7,63 +7,63 @@
*/
(function() {
- 'use strict';
- load("jstests/libs/check_log.js");
+'use strict';
+load("jstests/libs/check_log.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const node = rst.getPrimary();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const node = rst.getPrimary();
- const name = 'hang_before_releasing_transaction_oplog_hole';
- const dbName = 'test';
- const collName = name;
- const testDB = node.getDB(dbName);
- const coll = testDB[collName];
+const name = 'hang_before_releasing_transaction_oplog_hole';
+const dbName = 'test';
+const collName = name;
+const testDB = node.getDB(dbName);
+const coll = testDB[collName];
- // Create collection before running the transaction.
- assert.commandWorked(coll.insert({a: 1}));
+// Create collection before running the transaction.
+assert.commandWorked(coll.insert({a: 1}));
- // Run a transaction in a parallel shell. The transaction will be configured to hang on commit.
- // Rather than setting a timeout on commit and forfeiting our ability to check commit for
- // success, we use a separate thread to disable the failpoint and allow the server to finish
- // committing successfully.
- function transactionFn() {
- load('jstests/core/txns/libs/prepare_helpers.js');
+// Run a transaction in a parallel shell. The transaction will be configured to hang on commit.
+// Rather than setting a timeout on commit and forfeiting our ability to check commit for
+// success, we use a separate thread to disable the failpoint and allow the server to finish
+// committing successfully.
+function transactionFn() {
+ load('jstests/core/txns/libs/prepare_helpers.js');
- const name = 'hang_before_releasing_transaction_oplog_hole';
- const dbName = 'test';
- const collName = name;
- const session = db.getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
+ const name = 'hang_before_releasing_transaction_oplog_hole';
+ const dbName = 'test';
+ const collName = name;
+ const session = db.getMongo().startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase(dbName);
- session.startTransaction({readConcern: {level: 'snapshot'}});
- sessionDB[collName].update({}, {a: 2});
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ session.startTransaction({readConcern: {level: 'snapshot'}});
+ sessionDB[collName].update({}, {a: 2});
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- // Hang before releasing the 'commitTransaction' oplog entry hole.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: 'hangBeforeReleasingTransactionOplogHole', mode: 'alwaysOn'}));
+ // Hang before releasing the 'commitTransaction' oplog entry hole.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: 'hangBeforeReleasingTransactionOplogHole', mode: 'alwaysOn'}));
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- }
- const joinTransaction = startParallelShell(transactionFn, rst.ports[0]);
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+}
+const joinTransaction = startParallelShell(transactionFn, rst.ports[0]);
- jsTestLog("Waiting to hang with the oplog hole held open.");
- checkLog.contains(node, "hangBeforeReleasingTransactionOplogHole fail point enabled");
+jsTestLog("Waiting to hang with the oplog hole held open.");
+checkLog.contains(node, "hangBeforeReleasingTransactionOplogHole fail point enabled");
- jsTestLog("Waiting for 'commitTransaction' to advance lastApplied.");
- sleep(5 * 1000);
- assert.commandWorked(testDB.adminCommand(
- {configureFailPoint: 'hangBeforeReleasingTransactionOplogHole', mode: 'off'}));
+jsTestLog("Waiting for 'commitTransaction' to advance lastApplied.");
+sleep(5 * 1000);
+assert.commandWorked(testDB.adminCommand(
+ {configureFailPoint: 'hangBeforeReleasingTransactionOplogHole', mode: 'off'}));
- jsTestLog("Joining the transaction.");
- joinTransaction();
+jsTestLog("Joining the transaction.");
+joinTransaction();
- jsTestLog("Dropping another collection.");
- // A w:majority drop on a non-existent collection will not do a write, but will still wait for
- // write concern. We double check that that still succeeds.
- testDB["otherColl"].drop({writeConcern: {w: "majority"}});
+jsTestLog("Dropping another collection.");
+// A w:majority drop on a non-existent collection will not do a write, but will still wait for
+// write concern. We double check that that still succeeds.
+testDB["otherColl"].drop({writeConcern: {w: "majority"}});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/id_index_replication.js b/jstests/replsets/id_index_replication.js
index 764619013ea..bd693104104 100644
--- a/jstests/replsets/id_index_replication.js
+++ b/jstests/replsets/id_index_replication.js
@@ -3,77 +3,75 @@
* created on the secondary when the index spec is not included in the oplog.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- var replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- rst.initiate(replSetConfig);
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+var replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+rst.initiate(replSetConfig);
- var primaryDB = rst.getPrimary().getDB("test");
- var oplogColl = rst.getPrimary().getDB("local").oplog.rs;
- var secondaryDB = rst.getSecondary().getDB("test");
+var primaryDB = rst.getPrimary().getDB("test");
+var oplogColl = rst.getPrimary().getDB("local").oplog.rs;
+var secondaryDB = rst.getSecondary().getDB("test");
- function testOplogEntryIdIndexSpec(collectionName, idIndexSpec) {
- var oplogEntry = oplogColl.findOne({op: "c", "o.create": collectionName});
- assert.neq(null, oplogEntry);
- if (idIndexSpec === null) {
- assert(!oplogEntry.o.hasOwnProperty("idIndex"), tojson(oplogEntry));
- } else {
- assert.eq(0, bsonWoCompare(idIndexSpec, oplogEntry.o.idIndex), tojson(oplogEntry));
- }
+function testOplogEntryIdIndexSpec(collectionName, idIndexSpec) {
+ var oplogEntry = oplogColl.findOne({op: "c", "o.create": collectionName});
+ assert.neq(null, oplogEntry);
+ if (idIndexSpec === null) {
+ assert(!oplogEntry.o.hasOwnProperty("idIndex"), tojson(oplogEntry));
+ } else {
+ assert.eq(0, bsonWoCompare(idIndexSpec, oplogEntry.o.idIndex), tojson(oplogEntry));
}
+}
- assert.commandWorked(primaryDB.createCollection("without_version"));
- var allIndexes = primaryDB.without_version.getIndexes();
- var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(2, spec.v, "Expected primary to build a v=2 _id index: " + tojson(spec));
- testOplogEntryIdIndexSpec("without_version", spec);
+assert.commandWorked(primaryDB.createCollection("without_version"));
+var allIndexes = primaryDB.without_version.getIndexes();
+var spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(2, spec.v, "Expected primary to build a v=2 _id index: " + tojson(spec));
+testOplogEntryIdIndexSpec("without_version", spec);
- assert.commandWorked(
- primaryDB.createCollection("version_v2", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}}));
- allIndexes = primaryDB.version_v2.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(2, spec.v, "Expected primary to build a v=2 _id index: " + tojson(spec));
- testOplogEntryIdIndexSpec("version_v2", spec);
+assert.commandWorked(
+ primaryDB.createCollection("version_v2", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}}));
+allIndexes = primaryDB.version_v2.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(2, spec.v, "Expected primary to build a v=2 _id index: " + tojson(spec));
+testOplogEntryIdIndexSpec("version_v2", spec);
- assert.commandWorked(
- primaryDB.createCollection("version_v1", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
- allIndexes = primaryDB.version_v1.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(1, spec.v, "Expected primary to build a v=1 _id index: " + tojson(spec));
- testOplogEntryIdIndexSpec("version_v1", null);
+assert.commandWorked(
+ primaryDB.createCollection("version_v1", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}}));
+allIndexes = primaryDB.version_v1.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(1, spec.v, "Expected primary to build a v=1 _id index: " + tojson(spec));
+testOplogEntryIdIndexSpec("version_v1", null);
- rst.awaitReplication();
+rst.awaitReplication();
- // Verify that the secondary built _id indexes with the same version as on the primary.
+// Verify that the secondary built _id indexes with the same version as on the primary.
- allIndexes = secondaryDB.without_version.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(
- 2,
- spec.v,
- "Expected secondary to build a v=2 _id index when explicitly requested: " + tojson(spec));
+allIndexes = secondaryDB.without_version.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(2,
+ spec.v,
+ "Expected secondary to build a v=2 _id index when explicitly requested: " + tojson(spec));
- allIndexes = secondaryDB.version_v2.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(
- 2,
- spec.v,
- "Expected secondary to build a v=2 _id index when explicitly requested: " + tojson(spec));
+allIndexes = secondaryDB.version_v2.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(2,
+ spec.v,
+ "Expected secondary to build a v=2 _id index when explicitly requested: " + tojson(spec));
- allIndexes = secondaryDB.version_v1.getIndexes();
- spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
- assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
- assert.eq(1, spec.v, "Expected secondary to implicitly build a v=1 _id index: " + tojson(spec));
+allIndexes = secondaryDB.version_v1.getIndexes();
+spec = GetIndexHelpers.findByKeyPattern(allIndexes, {_id: 1});
+assert.neq(null, spec, "_id index not found: " + tojson(allIndexes));
+assert.eq(1, spec.v, "Expected secondary to implicitly build a v=1 _id index: " + tojson(spec));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync2.js b/jstests/replsets/initial_sync2.js
index b08f9bd1a71..27d3f0e0e66 100644
--- a/jstests/replsets/initial_sync2.js
+++ b/jstests/replsets/initial_sync2.js
@@ -19,7 +19,6 @@ load("jstests/replsets/rslib.js");
var basename = "jstests_initsync2";
var doTest = function() {
-
jsTest.log("1. Bring up set");
var replTest = new ReplSetTest({name: basename, nodes: [{rsConfig: {priority: 2}}, {}]});
var conns = replTest.startSet();
diff --git a/jstests/replsets/initial_sync_applier_error.js b/jstests/replsets/initial_sync_applier_error.js
index 36182e58c39..2bd65f51e12 100644
--- a/jstests/replsets/initial_sync_applier_error.js
+++ b/jstests/replsets/initial_sync_applier_error.js
@@ -10,47 +10,47 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- var name = 'initial_sync_applier_error';
- var replSet = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {arbiterOnly: true}}],
- });
+var name = 'initial_sync_applier_error';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {arbiterOnly: true}}],
+});
- replSet.startSet();
- replSet.initiate();
- var primary = replSet.getPrimary();
+replSet.startSet();
+replSet.initiate();
+var primary = replSet.getPrimary();
- var coll = primary.getDB('test').getCollection(name);
- assert.writeOK(coll.insert({_id: 0, content: "hi"}));
+var coll = primary.getDB('test').getCollection(name);
+assert.writeOK(coll.insert({_id: 0, content: "hi"}));
- // Add a secondary node but make it hang after retrieving the last op on the source
- // but before copying databases.
- var secondary = replSet.add({setParameter: "numInitialSyncAttempts=2"});
- secondary.setSlaveOk();
+// Add a secondary node but make it hang after retrieving the last op on the source
+// but before copying databases.
+var secondary = replSet.add({setParameter: "numInitialSyncAttempts=2"});
+secondary.setSlaveOk();
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
- replSet.reInitiate();
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+replSet.reInitiate();
- // Wait for fail point message to be logged.
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+// Wait for fail point message to be logged.
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
- var newCollName = name + '_2';
- assert.commandWorked(coll.renameCollection(newCollName, true));
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+var newCollName = name + '_2';
+assert.commandWorked(coll.renameCollection(newCollName, true));
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
- checkLog.contains(secondary, 'initial sync done');
+checkLog.contains(secondary, 'initial sync done');
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- assert.eq(0, secondary.getDB('test').getCollection(name).count());
- assert.eq(1, secondary.getDB('test').getCollection(newCollName).count());
- assert.eq("hi", secondary.getDB('test').getCollection(newCollName).findOne({_id: 0}).content);
- replSet.stopSet();
+assert.eq(0, secondary.getDB('test').getCollection(name).count());
+assert.eq(1, secondary.getDB('test').getCollection(newCollName).count());
+assert.eq("hi", secondary.getDB('test').getCollection(newCollName).findOne({_id: 0}).content);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_capped_index.js b/jstests/replsets/initial_sync_capped_index.js
index 0aa3c648499..a7c1a2a3de4 100644
--- a/jstests/replsets/initial_sync_capped_index.js
+++ b/jstests/replsets/initial_sync_capped_index.js
@@ -24,97 +24,97 @@
* This is a regression test for SERVER-29197.
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- /**
- * Overflow a capped collection 'coll' by continuously inserting a given document,
- * 'docToInsert'.
- */
- function overflowCappedColl(coll, docToInsert) {
- // Insert one document and save its _id.
- assert.writeOK(coll.insert(docToInsert));
- var origFirstDocId = coll.findOne()["_id"];
-
- // Detect overflow by seeing if the original first doc of the collection is still present.
- while (coll.findOne({_id: origFirstDocId})) {
- assert.commandWorked(coll.insert(docToInsert));
- }
- }
+"use strict";
- // Set up replica set.
- var testName = "initial_sync_capped_index";
- var dbName = testName;
- var replTest = new ReplSetTest({name: testName, nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- var primary = replTest.getPrimary();
- var primaryDB = primary.getDB(dbName);
- var cappedCollName = "capped_coll";
- var primaryCappedColl = primaryDB[cappedCollName];
-
- // Create a capped collection of the minimum allowed size.
- var cappedCollSize = 4096;
-
- jsTestLog("Creating capped collection of size " + cappedCollSize + " bytes.");
- assert.commandWorked(
- primaryDB.createCollection(cappedCollName, {capped: true, size: cappedCollSize}));
-
- // Overflow the capped collection.
- jsTestLog("Overflowing the capped collection.");
-
- var docSize = cappedCollSize / 8;
- var largeDoc = {a: new Array(docSize).join("*")};
- overflowCappedColl(primaryCappedColl, largeDoc);
-
- // Check that there are more than two documents in the collection. This will ensure the
- // secondary's collection cloner will send a getMore.
- assert.gt(primaryCappedColl.find().itcount(), 2);
-
- // Add a SECONDARY node. It should use batchSize=2 for its initial sync queries.
- jsTestLog("Adding secondary node.");
- replTest.add({setParameter: "collectionClonerBatchSize=2"});
-
- var secondary = replTest.getSecondary();
- var collectionClonerFailPoint = "initialSyncHangCollectionClonerAfterHandlingBatchResponse";
-
- // Make the collection cloner pause after its initial 'find' response on the capped collection.
- var nss = dbName + "." + cappedCollName;
- jsTestLog("Enabling collection cloner fail point for " + nss);
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: collectionClonerFailPoint, mode: 'alwaysOn', data: {nss: nss}}));
-
- // Let the SECONDARY begin initial sync.
- jsTestLog("Re-initiating replica set with new secondary.");
- replTest.reInitiate();
-
- jsTestLog("Waiting for the initial 'find' response of capped collection cloner to complete.");
- checkLog.contains(
- secondary,
- "initialSyncHangCollectionClonerAfterHandlingBatchResponse fail point enabled for " + nss);
-
- // Append documents to the capped collection so that the SECONDARY will clone these
- // additional documents.
- var docsToAppend = 2;
- for (var i = 0; i < docsToAppend; i++) {
- assert.writeOK(primaryDB[cappedCollName].insert(largeDoc));
- }
+load("jstests/libs/check_log.js");
- // Let the 'getMore' requests for the capped collection clone continue.
- jsTestLog("Disabling collection cloner fail point for " + nss);
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: collectionClonerFailPoint, mode: 'off', data: {nss: nss}}));
-
- // Wait until initial sync completes.
- replTest.awaitReplication();
-
- // Make sure the indexes created during initial sync are valid.
- var secondaryCappedColl = secondary.getDB(dbName)[cappedCollName];
- var validate_result = secondaryCappedColl.validate(true);
- var failMsg =
- "Index validation of '" + secondaryCappedColl.name + "' failed: " + tojson(validate_result);
- assert(validate_result.valid, failMsg);
- replTest.stopSet();
+/**
+ * Overflow a capped collection 'coll' by continuously inserting a given document,
+ * 'docToInsert'.
+ */
+function overflowCappedColl(coll, docToInsert) {
+ // Insert one document and save its _id.
+ assert.writeOK(coll.insert(docToInsert));
+ var origFirstDocId = coll.findOne()["_id"];
+
+ // Detect overflow by seeing if the original first doc of the collection is still present.
+ while (coll.findOne({_id: origFirstDocId})) {
+ assert.commandWorked(coll.insert(docToInsert));
+ }
+}
+
+// Set up replica set.
+var testName = "initial_sync_capped_index";
+var dbName = testName;
+var replTest = new ReplSetTest({name: testName, nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+var primary = replTest.getPrimary();
+var primaryDB = primary.getDB(dbName);
+var cappedCollName = "capped_coll";
+var primaryCappedColl = primaryDB[cappedCollName];
+
+// Create a capped collection of the minimum allowed size.
+var cappedCollSize = 4096;
+
+jsTestLog("Creating capped collection of size " + cappedCollSize + " bytes.");
+assert.commandWorked(
+ primaryDB.createCollection(cappedCollName, {capped: true, size: cappedCollSize}));
+
+// Overflow the capped collection.
+jsTestLog("Overflowing the capped collection.");
+
+var docSize = cappedCollSize / 8;
+var largeDoc = {a: new Array(docSize).join("*")};
+overflowCappedColl(primaryCappedColl, largeDoc);
+
+// Check that there are more than two documents in the collection. This will ensure the
+// secondary's collection cloner will send a getMore.
+assert.gt(primaryCappedColl.find().itcount(), 2);
+
+// Add a SECONDARY node. It should use batchSize=2 for its initial sync queries.
+jsTestLog("Adding secondary node.");
+replTest.add({setParameter: "collectionClonerBatchSize=2"});
+
+var secondary = replTest.getSecondary();
+var collectionClonerFailPoint = "initialSyncHangCollectionClonerAfterHandlingBatchResponse";
+
+// Make the collection cloner pause after its initial 'find' response on the capped collection.
+var nss = dbName + "." + cappedCollName;
+jsTestLog("Enabling collection cloner fail point for " + nss);
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: collectionClonerFailPoint, mode: 'alwaysOn', data: {nss: nss}}));
+
+// Let the SECONDARY begin initial sync.
+jsTestLog("Re-initiating replica set with new secondary.");
+replTest.reInitiate();
+
+jsTestLog("Waiting for the initial 'find' response of capped collection cloner to complete.");
+checkLog.contains(
+ secondary,
+ "initialSyncHangCollectionClonerAfterHandlingBatchResponse fail point enabled for " + nss);
+
+// Append documents to the capped collection so that the SECONDARY will clone these
+// additional documents.
+var docsToAppend = 2;
+for (var i = 0; i < docsToAppend; i++) {
+ assert.writeOK(primaryDB[cappedCollName].insert(largeDoc));
+}
+
+// Let the 'getMore' requests for the capped collection clone continue.
+jsTestLog("Disabling collection cloner fail point for " + nss);
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: collectionClonerFailPoint, mode: 'off', data: {nss: nss}}));
+
+// Wait until initial sync completes.
+replTest.awaitReplication();
+
+// Make sure the indexes created during initial sync are valid.
+var secondaryCappedColl = secondary.getDB(dbName)[cappedCollName];
+var validate_result = secondaryCappedColl.validate(true);
+var failMsg =
+ "Index validation of '" + secondaryCappedColl.name + "' failed: " + tojson(validate_result);
+assert(validate_result.valid, failMsg);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_cloner_dups.js b/jstests/replsets/initial_sync_cloner_dups.js
index 23b1b989400..7132d9f2087 100644
--- a/jstests/replsets/initial_sync_cloner_dups.js
+++ b/jstests/replsets/initial_sync_cloner_dups.js
@@ -7,126 +7,126 @@
* verify collection and both indexes on the secondary have the right number of docs
*/
(function(doNotRun) {
- "use strict";
+"use strict";
- if (doNotRun) {
- return;
- }
+if (doNotRun) {
+ return;
+}
- load('jstests/libs/parallelTester.js');
+load('jstests/libs/parallelTester.js');
- Random.setRandomSeed();
+Random.setRandomSeed();
- // used to parse RAM log file
- var contains = function(logLines, func) {
- var i = logLines.length;
- while (i--) {
- printjson(logLines[i]);
- if (func(logLines[i])) {
- return true;
- }
+// used to parse RAM log file
+var contains = function(logLines, func) {
+ var i = logLines.length;
+ while (i--) {
+ printjson(logLines[i]);
+ if (func(logLines[i])) {
+ return true;
}
- return false;
- };
-
- var replTest = new ReplSetTest({name: 'cloner', nodes: 3, oplogSize: 150 /*~1.5x data size*/});
- replTest.startSet();
- var conf = replTest.getReplSetConfig();
- conf.settings = {};
- conf.settings.chainingAllowed = false;
- replTest.initiate(conf);
- replTest.awaitSecondaryNodes();
- var primary = replTest.getPrimary();
- var coll = primary.getDB('test').cloner;
- coll.drop();
- coll.createIndex({k: 1});
-
- // These need to be big enough to force initial-sync to use many batches
- var numDocs = 100 * 1000;
- var bigStr = Array(1001).toString();
- var batch = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- batch.insert({_id: i, bigStr: bigStr});
}
- batch.execute();
-
- replTest.awaitReplication();
-
- jsTestLog("Start remove/insert on primary");
- var insertAndRemove = function(host) {
- jsTestLog("starting bg writes on " + host);
- var m = new Mongo(host);
- var db = m.getDB('test');
- var coll = db.cloner;
- var numDocs = coll.count();
- for (var i = 0; !db.stop.findOne(); i++) {
- var id = Random.randInt(numDocs);
- coll.remove({_id: id});
- coll.insert({_id: id});
-
- var id = i % numDocs;
- // print(id);
- coll.remove({_id: id});
- coll.insert({_id: id});
-
- // Try to throttle this thread to prevent overloading slow machines.
- sleep(1);
- }
-
- jsTestLog("finished bg writes on " + host);
- };
- var worker = new ScopedThread(insertAndRemove, primary.host);
- worker.start();
-
- jsTestLog("add a new secondary");
- var secondary = replTest.add({});
- replTest.reInitiate();
- secondary.setSlaveOk();
- // Wait for the secondary to get ReplSetInitiate command.
- replTest.waitForState(
- secondary,
- [ReplSetTest.State.STARTUP_2, ReplSetTest.State.RECOVERING, ReplSetTest.State.SECONDARY]);
-
- // This fail point will cause the first intial sync to fail, and leave an op in the buffer to
- // verify the fix from SERVER-17807
- print("=================== failpoint enabled ==============");
- printjson(assert.commandWorked(secondary.getDB("admin").adminCommand(
- {configureFailPoint: 'failInitSyncWithBufferedEntriesLeft', mode: {times: 1}})));
- printjson(assert.commandWorked(secondary.getDB("admin").adminCommand({resync: true})));
-
- // NOTE: This is here to prevent false negatives, but it is racy and dependent on magic numbers.
- // Removed the assertion because it was too flaky. Printing a warning instead (dan)
- jsTestLog("making sure we dropped some dups");
- var res = secondary.adminCommand({getLog: "global"});
- var droppedDups = (contains(res.log, function(v) {
- return v.indexOf("index build dropped" /* NNN dups*/) != -1;
- }));
- if (!droppedDups) {
- jsTestLog(
- "Warning: Test did not trigger duplicate documents, this run will be a false negative");
+ return false;
+};
+
+var replTest = new ReplSetTest({name: 'cloner', nodes: 3, oplogSize: 150 /*~1.5x data size*/});
+replTest.startSet();
+var conf = replTest.getReplSetConfig();
+conf.settings = {};
+conf.settings.chainingAllowed = false;
+replTest.initiate(conf);
+replTest.awaitSecondaryNodes();
+var primary = replTest.getPrimary();
+var coll = primary.getDB('test').cloner;
+coll.drop();
+coll.createIndex({k: 1});
+
+// These need to be big enough to force initial-sync to use many batches
+var numDocs = 100 * 1000;
+var bigStr = Array(1001).toString();
+var batch = coll.initializeUnorderedBulkOp();
+for (var i = 0; i < numDocs; i++) {
+ batch.insert({_id: i, bigStr: bigStr});
+}
+batch.execute();
+
+replTest.awaitReplication();
+
+jsTestLog("Start remove/insert on primary");
+var insertAndRemove = function(host) {
+ jsTestLog("starting bg writes on " + host);
+ var m = new Mongo(host);
+ var db = m.getDB('test');
+ var coll = db.cloner;
+ var numDocs = coll.count();
+ for (var i = 0; !db.stop.findOne(); i++) {
+ var id = Random.randInt(numDocs);
+ coll.remove({_id: id});
+ coll.insert({_id: id});
+
+ var id = i % numDocs;
+ // print(id);
+ coll.remove({_id: id});
+ coll.insert({_id: id});
+
+ // Try to throttle this thread to prevent overloading slow machines.
+ sleep(1);
}
- jsTestLog("stopping writes and waiting for replica set to coalesce");
- primary.getDB('test').stop.insert({});
- worker.join();
- // make sure all secondaries are caught up, after init sync
- reconnect(secondary.getDB("test"));
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
-
- jsTestLog("check that secondary has correct counts");
- var secondaryColl = secondary.getDB('test').getCollection('cloner');
- var index = secondaryColl.find({}, {_id: 1}).hint({_id: 1}).itcount();
- var secondary_index = secondaryColl.find({}, {_id: 1}).hint({k: 1}).itcount();
- var table = secondaryColl.find({}, {_id: 1}).hint({$natural: 1}).itcount();
- if (index != table || index != secondary_index) {
- printjson({
- name: coll,
- _id_index_count: index,
- secondary_index_count: secondary_index,
- table_count: table
- });
- }
- assert.eq(index, table);
- assert.eq(table, secondary_index);
+ jsTestLog("finished bg writes on " + host);
+};
+var worker = new ScopedThread(insertAndRemove, primary.host);
+worker.start();
+
+jsTestLog("add a new secondary");
+var secondary = replTest.add({});
+replTest.reInitiate();
+secondary.setSlaveOk();
+// Wait for the secondary to get ReplSetInitiate command.
+replTest.waitForState(
+ secondary,
+ [ReplSetTest.State.STARTUP_2, ReplSetTest.State.RECOVERING, ReplSetTest.State.SECONDARY]);
+
+// This fail point will cause the first intial sync to fail, and leave an op in the buffer to
+// verify the fix from SERVER-17807
+print("=================== failpoint enabled ==============");
+printjson(assert.commandWorked(secondary.getDB("admin").adminCommand(
+ {configureFailPoint: 'failInitSyncWithBufferedEntriesLeft', mode: {times: 1}})));
+printjson(assert.commandWorked(secondary.getDB("admin").adminCommand({resync: true})));
+
+// NOTE: This is here to prevent false negatives, but it is racy and dependent on magic numbers.
+// Removed the assertion because it was too flaky. Printing a warning instead (dan)
+jsTestLog("making sure we dropped some dups");
+var res = secondary.adminCommand({getLog: "global"});
+var droppedDups = (contains(res.log, function(v) {
+ return v.indexOf("index build dropped" /* NNN dups*/) != -1;
+}));
+if (!droppedDups) {
+ jsTestLog(
+ "Warning: Test did not trigger duplicate documents, this run will be a false negative");
+}
+
+jsTestLog("stopping writes and waiting for replica set to coalesce");
+primary.getDB('test').stop.insert({});
+worker.join();
+// make sure all secondaries are caught up, after init sync
+reconnect(secondary.getDB("test"));
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+
+jsTestLog("check that secondary has correct counts");
+var secondaryColl = secondary.getDB('test').getCollection('cloner');
+var index = secondaryColl.find({}, {_id: 1}).hint({_id: 1}).itcount();
+var secondary_index = secondaryColl.find({}, {_id: 1}).hint({k: 1}).itcount();
+var table = secondaryColl.find({}, {_id: 1}).hint({$natural: 1}).itcount();
+if (index != table || index != secondary_index) {
+ printjson({
+ name: coll,
+ _id_index_count: index,
+ secondary_index_count: secondary_index,
+ table_count: table
+ });
+}
+assert.eq(index, table);
+assert.eq(table, secondary_index);
})(true /* Disabled until SERVER-23476 re-enabled rsync command */);
diff --git a/jstests/replsets/initial_sync_commit_prepared_transaction.js b/jstests/replsets/initial_sync_commit_prepared_transaction.js
index 81590fc0bc2..80198e82673 100644
--- a/jstests/replsets/initial_sync_commit_prepared_transaction.js
+++ b/jstests/replsets/initial_sync_commit_prepared_transaction.js
@@ -7,107 +7,109 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
-
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
-
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName = "initial_sync_commit_prepared_transaction";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 1}));
-
- jsTestLog("Preparing a transaction that will be the oldest active transaction");
-
- // Prepare a transaction so that there is an active transaction with an oplog entry. The prepare
- // timestamp will become the beginFetchingTimestamp during initial sync.
- const session1 = primary.startSession({causalConsistency: false});
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 2}));
- let prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
-
- // Do another operation so that the beginFetchingTimestamp will be different from the
- // beginApplyingTimestamp.
- assert.commandWorked(testColl.insert({_id: 3}));
-
- jsTestLog("Restarting the secondary");
-
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync after the secondary
- // has copied {_id: 1} and {_id: 3}. This way we can try to commit the prepared transaction
- // while initial sync is paused and know that its operations won't be copied during collection
- // cloning. Instead, the commitTransaction oplog entry must be applied during oplog application.
- replTest.stop(secondary,
- // signal
- undefined,
- // Validation would encounter a prepare conflict on the open transaction.
- {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 2}}),
- 'numInitialSyncAttempts': 1
- }
- },
- true /* wait */);
-
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
-
- jsTestLog("Running operations while collection cloning is paused");
-
- // Commit a transaction on the sync source while collection cloning is paused so that we know
- // they must be applied during the oplog application stage of initial sync.
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
-
- jsTestLog("Resuming initial sync");
-
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
-
- // Wait for the secondary to complete initial sync.
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
- jsTestLog("Initial sync completed");
-
- // Make sure the transaction committed properly and is reflected after the initial sync.
- let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
- assert.docEq(res, {_id: 2}, res);
-
- // Step up the secondary after initial sync is done and make sure we can successfully run
- // another transaction.
- replTest.stepUp(secondary);
- replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
- let newPrimary = replTest.getPrimary();
- const session2 = newPrimary.startSession({causalConsistency: false});
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 4}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- res = newPrimary.getDB(dbName).getCollection(collName).findOne({_id: 4});
- assert.docEq(res, {_id: 4}, res);
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
+
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "initial_sync_commit_prepared_transaction";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 1}));
+
+jsTestLog("Preparing a transaction that will be the oldest active transaction");
+
+// Prepare a transaction so that there is an active transaction with an oplog entry. The prepare
+// timestamp will become the beginFetchingTimestamp during initial sync.
+const session1 = primary.startSession({causalConsistency: false});
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 2}));
+let prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+
+// Do another operation so that the beginFetchingTimestamp will be different from the
+// beginApplyingTimestamp.
+assert.commandWorked(testColl.insert({_id: 3}));
+
+jsTestLog("Restarting the secondary");
+
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync after the secondary
+// has copied {_id: 1} and {_id: 3}. This way we can try to commit the prepared transaction
+// while initial sync is paused and know that its operations won't be copied during collection
+// cloning. Instead, the commitTransaction oplog entry must be applied during oplog application.
+replTest.stop(secondary,
+ // signal
+ undefined,
+ // Validation would encounter a prepare conflict on the open transaction.
+ {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 2}}),
+ 'numInitialSyncAttempts': 1
+ }
+ },
+ true /* wait */);
+
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+
+jsTestLog("Running operations while collection cloning is paused");
+
+// Commit a transaction on the sync source while collection cloning is paused so that we know
+// they must be applied during the oplog application stage of initial sync.
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
+
+jsTestLog("Resuming initial sync");
+
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+
+// Wait for the secondary to complete initial sync.
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+
+jsTestLog("Initial sync completed");
+
+// Make sure the transaction committed properly and is reflected after the initial sync.
+let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
+assert.docEq(res, {_id: 2}, res);
+
+// Step up the secondary after initial sync is done and make sure we can successfully run
+// another transaction.
+replTest.stepUp(secondary);
+replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
+let newPrimary = replTest.getPrimary();
+const session2 = newPrimary.startSession({causalConsistency: false});
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 4}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+res = newPrimary.getDB(dbName).getCollection(collName).findOne({_id: 4});
+assert.docEq(res, {_id: 4}, res);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_document_validation.js b/jstests/replsets/initial_sync_document_validation.js
index 06d9388b51d..79d06f75140 100644
--- a/jstests/replsets/initial_sync_document_validation.js
+++ b/jstests/replsets/initial_sync_document_validation.js
@@ -3,27 +3,27 @@
*/
(function() {
- var name = 'initial_sync_document_validation';
- var replSet = new ReplSetTest({
- name: name,
- nodes: 2,
- });
+var name = 'initial_sync_document_validation';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: 2,
+});
- replSet.startSet();
- replSet.initiate();
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
+replSet.startSet();
+replSet.initiate();
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
- var coll = primary.getDB('test').getCollection(name);
- assert.writeOK(coll.insert({_id: 0, x: 1}));
- assert.commandWorked(coll.runCommand("collMod", {"validator": {a: {$exists: true}}}));
+var coll = primary.getDB('test').getCollection(name);
+assert.writeOK(coll.insert({_id: 0, x: 1}));
+assert.commandWorked(coll.runCommand("collMod", {"validator": {a: {$exists: true}}}));
- secondary = replSet.restart(secondary, {startClean: true});
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+secondary = replSet.restart(secondary, {startClean: true});
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- assert.eq(1, secondary.getDB("test")[name].count());
- assert.docEq({_id: 0, x: 1}, secondary.getDB("test")[name].findOne());
+assert.eq(1, secondary.getDB("test")[name].count());
+assert.docEq({_id: 0, x: 1}, secondary.getDB("test")[name].findOne());
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_drop_collection.js b/jstests/replsets/initial_sync_drop_collection.js
index babb30dac56..6488f55e01e 100644
--- a/jstests/replsets/initial_sync_drop_collection.js
+++ b/jstests/replsets/initial_sync_drop_collection.js
@@ -1,173 +1,167 @@
// Test that CollectionCloner completes without error when a collection is dropped during cloning.
(function() {
- "use strict";
-
- // Skip db hash check because secondary cannot complete initial sync.
- TestData.skipCheckDBHashes = true;
-
- load("jstests/libs/check_log.js");
- load('jstests/replsets/libs/two_phase_drops.js');
- load("jstests/libs/uuid_util.js");
-
- // Set up replica set. Disallow chaining so nodes always sync from primary.
- const testName = "initial_sync_drop_collection";
- const dbName = testName;
- var replTest = new ReplSetTest({
- name: testName,
- nodes: [{}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- replTest.startSet();
- replTest.initiate();
-
- var primary = replTest.getPrimary();
- var primaryDB = primary.getDB(dbName);
- var secondary = replTest.getSecondary();
- var secondaryDB = secondary.getDB(dbName);
- const collName = "testcoll";
- var primaryColl = primaryDB[collName];
- var secondaryColl = secondaryDB[collName];
- var pRenameColl = primaryDB["r_" + collName];
- var nss = primaryColl.getFullName();
-
- // This function adds data to the collection, restarts the secondary node with the given
- // parameters and setting the given failpoint, waits for the failpoint to be hit,
- // drops the collection, then disables the failpoint. It then optionally waits for the
- // expectedLog message and waits for the secondary to complete initial sync, then ensures
- // the collection on the secondary is empty.
- function setupTest({failPoint, secondaryStartupParams}) {
- jsTestLog("Writing data to collection.");
- assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
-
- jsTestLog("Restarting secondary with failPoint " + failPoint + " set for " + nss);
- secondaryStartupParams = secondaryStartupParams || {};
- secondaryStartupParams['failpoint.' + failPoint] =
- tojson({mode: 'alwaysOn', data: {nss: nss}});
- // Skip clearing initial sync progress after a successful initial sync attempt so that we
- // can check initialSyncStatus fields after initial sync is complete.
- secondaryStartupParams['failpoint.skipClearInitialSyncState'] = tojson({mode: 'alwaysOn'});
- secondaryStartupParams['numInitialSyncAttempts'] = 1;
- replTest.restart(secondary, {startClean: true, setParameter: secondaryStartupParams});
-
- jsTestLog("Waiting for secondary to reach failPoint " + failPoint);
- checkLog.contains(secondary, failPoint + " fail point enabled for " + nss);
-
- // Restarting the secondary may have resulted in an election. Wait until the system
- // stabilizes and reaches RS_STARTUP2 state.
- replTest.getPrimary();
- replTest.waitForState(secondary, ReplSetTest.State.STARTUP_2);
+"use strict";
+
+// Skip db hash check because secondary cannot complete initial sync.
+TestData.skipCheckDBHashes = true;
+
+load("jstests/libs/check_log.js");
+load('jstests/replsets/libs/two_phase_drops.js');
+load("jstests/libs/uuid_util.js");
+
+// Set up replica set. Disallow chaining so nodes always sync from primary.
+const testName = "initial_sync_drop_collection";
+const dbName = testName;
+var replTest = new ReplSetTest(
+ {name: testName, nodes: [{}, {rsConfig: {priority: 0}}], settings: {chainingAllowed: false}});
+replTest.startSet();
+replTest.initiate();
+
+var primary = replTest.getPrimary();
+var primaryDB = primary.getDB(dbName);
+var secondary = replTest.getSecondary();
+var secondaryDB = secondary.getDB(dbName);
+const collName = "testcoll";
+var primaryColl = primaryDB[collName];
+var secondaryColl = secondaryDB[collName];
+var pRenameColl = primaryDB["r_" + collName];
+var nss = primaryColl.getFullName();
+
+// This function adds data to the collection, restarts the secondary node with the given
+// parameters and setting the given failpoint, waits for the failpoint to be hit,
+// drops the collection, then disables the failpoint. It then optionally waits for the
+// expectedLog message and waits for the secondary to complete initial sync, then ensures
+// the collection on the secondary is empty.
+function setupTest({failPoint, secondaryStartupParams}) {
+ jsTestLog("Writing data to collection.");
+ assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
+
+ jsTestLog("Restarting secondary with failPoint " + failPoint + " set for " + nss);
+ secondaryStartupParams = secondaryStartupParams || {};
+ secondaryStartupParams['failpoint.' + failPoint] = tojson({mode: 'alwaysOn', data: {nss: nss}});
+ // Skip clearing initial sync progress after a successful initial sync attempt so that we
+ // can check initialSyncStatus fields after initial sync is complete.
+ secondaryStartupParams['failpoint.skipClearInitialSyncState'] = tojson({mode: 'alwaysOn'});
+ secondaryStartupParams['numInitialSyncAttempts'] = 1;
+ replTest.restart(secondary, {startClean: true, setParameter: secondaryStartupParams});
+
+ jsTestLog("Waiting for secondary to reach failPoint " + failPoint);
+ checkLog.contains(secondary, failPoint + " fail point enabled for " + nss);
+
+ // Restarting the secondary may have resulted in an election. Wait until the system
+ // stabilizes and reaches RS_STARTUP2 state.
+ replTest.getPrimary();
+ replTest.waitForState(secondary, ReplSetTest.State.STARTUP_2);
+}
+
+function finishTest({failPoint, secondaryStartupParams, expectedLog, waitForDrop, createNew}) {
+ // Get the uuid for use in checking the log line.
+ let uuid = getUUIDFromListCollections(primaryDB, collName);
+
+ jsTestLog("Dropping collection on primary: " + primaryColl.getFullName());
+ assert(primaryColl.drop());
+
+ if (waitForDrop) {
+ jsTestLog("Waiting for drop to commit on primary");
+ TwoPhaseDropCollectionTest.waitForDropToComplete(primaryDB, collName);
}
- function finishTest({failPoint, secondaryStartupParams, expectedLog, waitForDrop, createNew}) {
- // Get the uuid for use in checking the log line.
- let uuid = getUUIDFromListCollections(primaryDB, collName);
+ if (createNew) {
+ jsTestLog("Creating a new collection with the same name: " + primaryColl.getFullName());
+ assert.writeOK(primaryColl.insert({_id: "not the same collection"}));
+ }
- jsTestLog("Dropping collection on primary: " + primaryColl.getFullName());
- assert(primaryColl.drop());
+ jsTestLog("Allowing secondary to continue.");
+ assert.commandWorked(secondary.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
- if (waitForDrop) {
- jsTestLog("Waiting for drop to commit on primary");
- TwoPhaseDropCollectionTest.waitForDropToComplete(primaryDB, collName);
- }
-
- if (createNew) {
- jsTestLog("Creating a new collection with the same name: " + primaryColl.getFullName());
- assert.writeOK(primaryColl.insert({_id: "not the same collection"}));
- }
-
- jsTestLog("Allowing secondary to continue.");
- assert.commandWorked(secondary.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
-
- if (expectedLog) {
- jsTestLog(eval(expectedLog));
- checkLog.contains(secondary, eval(expectedLog));
- }
-
- jsTestLog("Waiting for initial sync to complete.");
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
- let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert.eq(0, res.initialSyncStatus.failedInitialSyncAttempts);
-
- if (createNew) {
- assert.eq([{_id: "not the same collection"}], secondaryColl.find().toArray());
- assert(primaryColl.drop());
- } else {
- assert.eq(0, secondaryColl.find().itcount());
- }
- replTest.checkReplicatedDataHashes();
+ if (expectedLog) {
+ jsTestLog(eval(expectedLog));
+ checkLog.contains(secondary, eval(expectedLog));
}
- function runDropTest(params) {
- setupTest(params);
- finishTest(params);
- }
+ jsTestLog("Waiting for initial sync to complete.");
+ replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Testing dropping between listIndexes and find.");
- runDropTest({failPoint: "initialSyncHangCollectionClonerBeforeEstablishingCursor"});
-
- jsTestLog(
- "Testing dropping between listIndexes and find, with new same-name collection created.");
- runDropTest(
- {failPoint: "initialSyncHangCollectionClonerBeforeEstablishingCursor", createNew: true});
-
- jsTestLog("Testing drop-pending between getMore calls.");
- runDropTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`"
- });
-
- jsTestLog("Testing drop-pending with new same-name collection created, between getMore calls.");
- runDropTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`",
- createNew: true
- });
-
- jsTestLog("Testing committed drop between getMore calls.");
-
- // Add another node to the set, so when we drop the collection it can commit. This other
- // secondary will be finished with initial sync when the drop happens.
- var secondary2 = replTest.add({rsConfig: {priority: 0}});
- replTest.reInitiate();
- replTest.waitForState(secondary2, ReplSetTest.State.SECONDARY);
-
- runDropTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- waitForDrop: true,
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`"
- });
-
- jsTestLog("Testing rename between getMores.");
- setupTest({
- failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- secondaryStartupParams: {collectionClonerBatchSize: 1},
- });
- jsTestLog("Renaming collection on primary");
- assert.commandWorked(primary.adminCommand({
- renameCollection: primaryColl.getFullName(),
- to: pRenameColl.getFullName(),
- dropTarget: false
- }));
+ let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+ assert.eq(0, res.initialSyncStatus.failedInitialSyncAttempts);
- jsTestLog("Allowing secondary to continue.");
- // Make sure we don't reach the fassert() indicating initial sync failure.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangBeforeFinish", mode: 'alwaysOn'}));
-
- assert.commandWorked(secondary.adminCommand({
- configureFailPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
- mode: 'off'
- }));
- jsTestLog("Waiting for initial sync to complete.");
- checkLog.contains(secondary,
- "The maximum number of retries have been exhausted for initial sync.");
- replTest.stopSet();
+ if (createNew) {
+ assert.eq([{_id: "not the same collection"}], secondaryColl.find().toArray());
+ assert(primaryColl.drop());
+ } else {
+ assert.eq(0, secondaryColl.find().itcount());
+ }
+ replTest.checkReplicatedDataHashes();
+}
+
+function runDropTest(params) {
+ setupTest(params);
+ finishTest(params);
+}
+
+jsTestLog("Testing dropping between listIndexes and find.");
+runDropTest({failPoint: "initialSyncHangCollectionClonerBeforeEstablishingCursor"});
+
+jsTestLog("Testing dropping between listIndexes and find, with new same-name collection created.");
+runDropTest(
+ {failPoint: "initialSyncHangCollectionClonerBeforeEstablishingCursor", createNew: true});
+
+jsTestLog("Testing drop-pending between getMore calls.");
+runDropTest({
+ failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ secondaryStartupParams: {collectionClonerBatchSize: 1},
+ expectedLog:
+ "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`"
+});
+
+jsTestLog("Testing drop-pending with new same-name collection created, between getMore calls.");
+runDropTest({
+ failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ secondaryStartupParams: {collectionClonerBatchSize: 1},
+ expectedLog:
+ "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`",
+ createNew: true
+});
+
+jsTestLog("Testing committed drop between getMore calls.");
+
+// Add another node to the set, so when we drop the collection it can commit. This other
+// secondary will be finished with initial sync when the drop happens.
+var secondary2 = replTest.add({rsConfig: {priority: 0}});
+replTest.reInitiate();
+replTest.waitForState(secondary2, ReplSetTest.State.SECONDARY);
+
+runDropTest({
+ failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ secondaryStartupParams: {collectionClonerBatchSize: 1},
+ waitForDrop: true,
+ expectedLog:
+ "`CollectionCloner ns: '${nss}' uuid: ${uuid} stopped because collection was dropped.`"
+});
+
+jsTestLog("Testing rename between getMores.");
+setupTest({
+ failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ secondaryStartupParams: {collectionClonerBatchSize: 1},
+});
+jsTestLog("Renaming collection on primary");
+assert.commandWorked(primary.adminCommand({
+ renameCollection: primaryColl.getFullName(),
+ to: pRenameColl.getFullName(),
+ dropTarget: false
+}));
+
+jsTestLog("Allowing secondary to continue.");
+// Make sure we don't reach the fassert() indicating initial sync failure.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "initialSyncHangBeforeFinish", mode: 'alwaysOn'}));
+
+assert.commandWorked(secondary.adminCommand({
+ configureFailPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
+ mode: 'off'
+}));
+jsTestLog("Waiting for initial sync to complete.");
+checkLog.contains(secondary, "The maximum number of retries have been exhausted for initial sync.");
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_during_stepdown.js b/jstests/replsets/initial_sync_during_stepdown.js
index d142b911b3c..9d68ac69c49 100644
--- a/jstests/replsets/initial_sync_during_stepdown.js
+++ b/jstests/replsets/initial_sync_during_stepdown.js
@@ -2,176 +2,173 @@
* Test that stepdown during collection cloning and oplog fetching does not interrupt initial sync.
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
-
- const testName = "initialSyncDuringStepDown";
- const dbName = testName;
- const collName = "testcoll";
-
- // Start a 3 node replica set to avoid primary step down after secondary restart.
- const rst = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}],
- settings: {chainingAllowed: false}
- });
- rst.startSet();
- rst.initiate();
-
- var primary = rst.getPrimary();
- var primaryDB = primary.getDB(dbName);
- var primaryAdmin = primary.getDB("admin");
- var primaryColl = primaryDB[collName];
- var secondary = rst.getSecondary();
- var secondaryDB = secondary.getDB(dbName);
- var secondaryColl = secondaryDB[collName];
- var dbNss = primaryDB.getName();
- var collNss = primaryColl.getFullName();
-
- function setupTest({
- failPoint,
- nss: nss = '',
- nssSuffix: nssSuffix = '',
- secondaryStartupParams: secondaryStartupParams = {}
- }) {
- jsTestLog("Writing data to collection.");
- assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
-
- jsTestLog("Stopping secondary.");
- rst.stop(secondary);
-
- jsTestLog("Enabling failpoint '" + failPoint + "' on primary (sync source).");
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: failPoint,
- data: {nss: nss + nssSuffix, shouldCheckForInterrupt: true, shouldNotdropLock: true},
- mode: "alwaysOn"
- }));
-
- jsTestLog("Starting secondary.");
- secondaryStartupParams['numInitialSyncAttempts'] = 1;
- // Skip clearing initial sync progress after a successful initial sync attempt so that we
- // can check initialSyncStatus fields after initial sync is complete.
- secondaryStartupParams['failpoint.skipClearInitialSyncState'] = tojson({mode: 'alwaysOn'});
- rst.start(secondary, {startClean: true, setParameter: secondaryStartupParams});
-
- // Wait until secondary reaches RS_STARTUP2 state.
- rst.waitForState(secondary, ReplSetTest.State.STARTUP_2);
- }
-
- function finishTest(
- {failPoint, nss: nss = '', DocsCopiedByOplogFetcher: DocsCopiedByOplogFetcher = 0}) {
- jsTestLog("Waiting for primary to reach failPoint '" + failPoint + "'.");
- waitForCurOpByFailPoint(primaryAdmin, new RegExp('^' + nss), failPoint);
-
- jsTestLog("Making primary step down");
- const joinStepDownThread = startParallelShell(() => {
- assert.commandWorked(db.adminCommand({"replSetStepDown": 30 * 60, "force": true}));
- }, primary.port);
-
- // Wait until the step down has started to kill user operations.
- checkLog.contains(primary, "Starting to kill user operations");
-
- jsTestLog("Allowing initial sync to continue.");
- assert.commandWorked(
- primaryAdmin.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
-
- jsTestLog("Waiting for initial sync to complete.");
- rst.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
- // Wait until the primary transitioned to SECONDARY state.
- joinStepDownThread();
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- jsTestLog("Validating initial sync data.");
- let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert.eq(0, res.initialSyncStatus.failedInitialSyncAttempts);
- assert.eq(2 + DocsCopiedByOplogFetcher, secondaryColl.find().itcount());
-
- // As checkReplicatedDataHashes requires primary to validate the cloned data, we need to
- // unfreeze the old primary and make it re-elected.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
- rst.checkReplicatedDataHashes();
-
- jsTestLog("Dropping collection '" + collName + "'.");
- assert(primaryColl.drop());
- }
-
- function runStepDownTest(params) {
- setupTest(params);
- finishTest(params);
- }
-
- jsTestLog("Testing stepdown while 'databases' cloner lists databases.");
- runStepDownTest({failPoint: "hangBeforeListDatabases"});
-
- jsTestLog("Testing stepdown while 'database' cloner lists collections.");
- runStepDownTest(
- {failPoint: "hangBeforeListCollections", nss: dbNss, nssSuffix: ".$cmd.listCollections"});
-
- jsTestLog("Testing stepdown while 'collection' cloner performs collection count.");
- runStepDownTest({failPoint: "hangBeforeCollectionCount", nss: collNss});
-
- jsTestLog("Testing stepdown while 'collection' cloner list indexes for a collection.");
- runStepDownTest({failPoint: "hangBeforeListIndexes", nss: collNss});
-
- jsTestLog("Testing stepdown while 'collection' cloner clones collection data.");
- runStepDownTest({failPoint: "waitInFindBeforeMakingBatch", nss: collNss});
-
- jsTestLog("Testing stepdown between collection data batches.");
- runStepDownTest({
- failPoint: "waitWithPinnedCursorDuringGetMoreBatch",
- nss: collNss,
- secondaryStartupParams: {collectionClonerBatchSize: 1}
- });
-
- // Restart secondary with "oplogFetcherInitialSyncMaxFetcherRestarts"
- // set to zero to avoid masking the oplog fetcher error and enable fail point
- // "waitAfterPinningCursorBeforeGetMoreBatch" which drops and reacquires read lock
- // to prevent deadlock between getmore and insert thread for ephemeral storage
- // engine.
- jsTestLog("Testing stepdown during oplog fetching");
- const oplogNss = "local.oplog.rs";
- setupTest({
- failPoint: "waitAfterPinningCursorBeforeGetMoreBatch",
- nss: oplogNss,
- secondaryStartupParams: {
- initialSyncOplogFetcherBatchSize: 1,
- oplogFetcherInitialSyncMaxFetcherRestarts: 0,
- "failpoint.initialSyncHangAfterDataCloning": tojson({mode: 'alwaysOn'})
- }
- });
-
- jsTestLog("Waiting for collection cloning to complete.");
- checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled");
-
- // Insert more data so that these are replicated to secondary node via oplog fetcher.
- jsTestLog("Inserting more data on primary.");
- assert.writeOK(primaryColl.insert([{_id: 3}, {_id: 4}]));
-
- // Insert is successful. So, enable fail point "waitWithPinnedCursorDuringGetMoreBatch"
- // such that it doesn't drop locks when getmore cmd waits inside the fail point block.
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+
+const testName = "initialSyncDuringStepDown";
+const dbName = testName;
+const collName = "testcoll";
+
+// Start a 3 node replica set to avoid primary step down after secondary restart.
+const rst = new ReplSetTest(
+ {nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}], settings: {chainingAllowed: false}});
+rst.startSet();
+rst.initiate();
+
+var primary = rst.getPrimary();
+var primaryDB = primary.getDB(dbName);
+var primaryAdmin = primary.getDB("admin");
+var primaryColl = primaryDB[collName];
+var secondary = rst.getSecondary();
+var secondaryDB = secondary.getDB(dbName);
+var secondaryColl = secondaryDB[collName];
+var dbNss = primaryDB.getName();
+var collNss = primaryColl.getFullName();
+
+function setupTest({
+ failPoint,
+ nss: nss = '',
+ nssSuffix: nssSuffix = '',
+ secondaryStartupParams: secondaryStartupParams = {}
+}) {
+ jsTestLog("Writing data to collection.");
+ assert.writeOK(primaryColl.insert([{_id: 1}, {_id: 2}]));
+
+ jsTestLog("Stopping secondary.");
+ rst.stop(secondary);
+
+ jsTestLog("Enabling failpoint '" + failPoint + "' on primary (sync source).");
assert.commandWorked(primary.adminCommand({
- configureFailPoint: "waitWithPinnedCursorDuringGetMoreBatch",
- data: {nss: oplogNss, shouldCheckForInterrupt: true, shouldNotdropLock: true},
+ configureFailPoint: failPoint,
+ data: {nss: nss + nssSuffix, shouldCheckForInterrupt: true, shouldNotdropLock: true},
mode: "alwaysOn"
}));
- // Now, disable fail point "waitAfterPinningCursorBeforeGetMoreBatch" to allow getmore to
- // continue and hang on "waitWithPinnedCursorDuringGetMoreBatch" fail point.
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}));
-
- // Disable fail point on secondary to allow initial sync to continue.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangAfterDataCloning", mode: "off"}));
-
- finishTest({
- failPoint: "waitWithPinnedCursorDuringGetMoreBatch",
- nss: "local.oplog.rs",
- DocsCopiedByOplogFetcher: 2
- });
-
- rst.stopSet();
+ jsTestLog("Starting secondary.");
+ secondaryStartupParams['numInitialSyncAttempts'] = 1;
+ // Skip clearing initial sync progress after a successful initial sync attempt so that we
+ // can check initialSyncStatus fields after initial sync is complete.
+ secondaryStartupParams['failpoint.skipClearInitialSyncState'] = tojson({mode: 'alwaysOn'});
+ rst.start(secondary, {startClean: true, setParameter: secondaryStartupParams});
+
+ // Wait until secondary reaches RS_STARTUP2 state.
+ rst.waitForState(secondary, ReplSetTest.State.STARTUP_2);
+}
+
+function finishTest(
+ {failPoint, nss: nss = '', DocsCopiedByOplogFetcher: DocsCopiedByOplogFetcher = 0}) {
+ jsTestLog("Waiting for primary to reach failPoint '" + failPoint + "'.");
+ waitForCurOpByFailPoint(primaryAdmin, new RegExp('^' + nss), failPoint);
+
+ jsTestLog("Making primary step down");
+ const joinStepDownThread = startParallelShell(() => {
+ assert.commandWorked(db.adminCommand({"replSetStepDown": 30 * 60, "force": true}));
+ }, primary.port);
+
+ // Wait until the step down has started to kill user operations.
+ checkLog.contains(primary, "Starting to kill user operations");
+
+ jsTestLog("Allowing initial sync to continue.");
+ assert.commandWorked(primaryAdmin.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
+
+ jsTestLog("Waiting for initial sync to complete.");
+ rst.waitForState(secondary, ReplSetTest.State.SECONDARY);
+
+ // Wait until the primary transitioned to SECONDARY state.
+ joinStepDownThread();
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+ jsTestLog("Validating initial sync data.");
+ let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+ assert.eq(0, res.initialSyncStatus.failedInitialSyncAttempts);
+ assert.eq(2 + DocsCopiedByOplogFetcher, secondaryColl.find().itcount());
+
+ // As checkReplicatedDataHashes requires primary to validate the cloned data, we need to
+ // unfreeze the old primary and make it re-elected.
+ assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+ rst.checkReplicatedDataHashes();
+
+ jsTestLog("Dropping collection '" + collName + "'.");
+ assert(primaryColl.drop());
+}
+
+function runStepDownTest(params) {
+ setupTest(params);
+ finishTest(params);
+}
+
+jsTestLog("Testing stepdown while 'databases' cloner lists databases.");
+runStepDownTest({failPoint: "hangBeforeListDatabases"});
+
+jsTestLog("Testing stepdown while 'database' cloner lists collections.");
+runStepDownTest(
+ {failPoint: "hangBeforeListCollections", nss: dbNss, nssSuffix: ".$cmd.listCollections"});
+
+jsTestLog("Testing stepdown while 'collection' cloner performs collection count.");
+runStepDownTest({failPoint: "hangBeforeCollectionCount", nss: collNss});
+
+jsTestLog("Testing stepdown while 'collection' cloner list indexes for a collection.");
+runStepDownTest({failPoint: "hangBeforeListIndexes", nss: collNss});
+
+jsTestLog("Testing stepdown while 'collection' cloner clones collection data.");
+runStepDownTest({failPoint: "waitInFindBeforeMakingBatch", nss: collNss});
+
+jsTestLog("Testing stepdown between collection data batches.");
+runStepDownTest({
+ failPoint: "waitWithPinnedCursorDuringGetMoreBatch",
+ nss: collNss,
+ secondaryStartupParams: {collectionClonerBatchSize: 1}
+});
+
+// Restart secondary with "oplogFetcherInitialSyncMaxFetcherRestarts"
+// set to zero to avoid masking the oplog fetcher error and enable fail point
+// "waitAfterPinningCursorBeforeGetMoreBatch" which drops and reacquires read lock
+// to prevent deadlock between getmore and insert thread for ephemeral storage
+// engine.
+jsTestLog("Testing stepdown during oplog fetching");
+const oplogNss = "local.oplog.rs";
+setupTest({
+ failPoint: "waitAfterPinningCursorBeforeGetMoreBatch",
+ nss: oplogNss,
+ secondaryStartupParams: {
+ initialSyncOplogFetcherBatchSize: 1,
+ oplogFetcherInitialSyncMaxFetcherRestarts: 0,
+ "failpoint.initialSyncHangAfterDataCloning": tojson({mode: 'alwaysOn'})
+ }
+});
+
+jsTestLog("Waiting for collection cloning to complete.");
+checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled");
+
+// Insert more data so that these are replicated to secondary node via oplog fetcher.
+jsTestLog("Inserting more data on primary.");
+assert.writeOK(primaryColl.insert([{_id: 3}, {_id: 4}]));
+
+// Insert is successful. So, enable fail point "waitWithPinnedCursorDuringGetMoreBatch"
+// such that it doesn't drop locks when getmore cmd waits inside the fail point block.
+assert.commandWorked(primary.adminCommand({
+ configureFailPoint: "waitWithPinnedCursorDuringGetMoreBatch",
+ data: {nss: oplogNss, shouldCheckForInterrupt: true, shouldNotdropLock: true},
+ mode: "alwaysOn"
+}));
+
+// Now, disable fail point "waitAfterPinningCursorBeforeGetMoreBatch" to allow getmore to
+// continue and hang on "waitWithPinnedCursorDuringGetMoreBatch" fail point.
+assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}));
+
+// Disable fail point on secondary to allow initial sync to continue.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "initialSyncHangAfterDataCloning", mode: "off"}));
+
+finishTest({
+ failPoint: "waitWithPinnedCursorDuringGetMoreBatch",
+ nss: "local.oplog.rs",
+ DocsCopiedByOplogFetcher: 2
+});
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_fail_insert_once.js b/jstests/replsets/initial_sync_fail_insert_once.js
index d85f97f5a63..0a1f0a11a8a 100644
--- a/jstests/replsets/initial_sync_fail_insert_once.js
+++ b/jstests/replsets/initial_sync_fail_insert_once.js
@@ -5,33 +5,33 @@
*/
(function() {
- var name = 'initial_sync_fail_insert_once';
- var replSet = new ReplSetTest(
- {name: name, nodes: 2, nodeOptions: {setParameter: "numInitialSyncAttempts=3"}});
+var name = 'initial_sync_fail_insert_once';
+var replSet = new ReplSetTest(
+ {name: name, nodes: 2, nodeOptions: {setParameter: "numInitialSyncAttempts=3"}});
- replSet.startSet();
- replSet.initiate();
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
+replSet.startSet();
+replSet.initiate();
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
- var coll = primary.getDB('test').getCollection(name);
- assert.writeOK(coll.insert({_id: 0, x: 1}, {writeConcern: {w: 2}}));
+var coll = primary.getDB('test').getCollection(name);
+assert.writeOK(coll.insert({_id: 0, x: 1}, {writeConcern: {w: 2}}));
- jsTest.log("Enabling Failpoint failCollectionInserts on " + tojson(secondary));
- assert.commandWorked(secondary.getDB("admin").adminCommand({
- configureFailPoint: "failCollectionInserts",
- mode: {times: 2},
- data: {collectionNS: coll.getFullName()}
- }));
+jsTest.log("Enabling Failpoint failCollectionInserts on " + tojson(secondary));
+assert.commandWorked(secondary.getDB("admin").adminCommand({
+ configureFailPoint: "failCollectionInserts",
+ mode: {times: 2},
+ data: {collectionNS: coll.getFullName()}
+}));
- jsTest.log("Re-syncing " + tojson(secondary));
- secondary = replSet.restart(secondary, {startClean: true});
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+jsTest.log("Re-syncing " + tojson(secondary));
+secondary = replSet.restart(secondary, {startClean: true});
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- assert.eq(1, secondary.getDB("test")[name].count());
- assert.docEq({_id: 0, x: 1}, secondary.getDB("test")[name].findOne());
+assert.eq(1, secondary.getDB("test")[name].count());
+assert.docEq({_id: 0, x: 1}, secondary.getDB("test")[name].findOne());
- jsTest.log("Stopping repl set test; finished.");
- replSet.stopSet();
+jsTest.log("Stopping repl set test; finished.");
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_fcv.js b/jstests/replsets/initial_sync_fcv.js
index a0fd4420548..af0a466848c 100644
--- a/jstests/replsets/initial_sync_fcv.js
+++ b/jstests/replsets/initial_sync_fcv.js
@@ -5,86 +5,85 @@
*/
(function() {
- 'use strict';
-
- load("jstests/libs/feature_compatibility_version.js");
- load('jstests/libs/check_log.js');
-
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
-
- // We disallow the secondary node from voting so that the primary's featureCompatibilityVersion
- // can be modified while the secondary node is still waiting to complete its initial sync.
- const replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- replSetConfig.members[1].votes = 0;
- rst.initiate(replSetConfig);
-
- const primary = rst.getPrimary();
- const dbName = 'foo';
- const collName = 'bar';
-
- assert.writeOK(primary.getDB(dbName).getCollection(collName).insert({a: 1}));
-
- function runInitialSync(cmd, initialFCV) {
- assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: initialFCV}));
-
- jsTestLog('Testing setting fCV with ' + tojson(cmd));
-
- const failPointOptions = tojson({mode: 'alwaysOn', data: {database: dbName}});
- rst.restart(1, {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangBeforeListCollections': failPointOptions,
- 'failpoint.skipClearInitialSyncState': tojson({mode: 'alwaysOn'}),
- numInitialSyncAttempts: 2
- }
- });
- const secondary = rst.nodes[1];
-
- // Initial sync clones the 'admin' database first, which will set the fCV on the
- // secondary to initialFCV. We then block the secondary before issuing 'listCollections' on
- // the test database.
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeListCollections fail point enabled');
-
- // Initial sync is stopped right before 'listCollections' on the test database. We now run
- // the test command to modify the fCV.
- assert.commandWorked(primary.adminCommand(cmd));
-
- // Let initial sync finish, making sure that it fails due to the feature compatibility
- // version change.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeListCollections', mode: 'off'}));
- checkLog.contains(secondary,
- 'Applying operation on feature compatibility version document');
-
- jsTestLog('Wait for both nodes to be up-to-date');
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
-
- let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert.eq(res.initialSyncStatus.failedInitialSyncAttempts, 1);
-
- // We check oplogs and data hashes before we restart the second node.
- rst.checkOplogs();
- rst.checkReplicatedDataHashes();
- }
-
- // Ensure that attempting to downgrade the featureCompatibilityVersion during initial sync
- // fails.
- runInitialSync({setFeatureCompatibilityVersion: lastStableFCV}, /*initialFCV*/ latestFCV);
-
- // Ensure that attempting to upgrade the featureCompatibilityVersion during initial sync fails.
- runInitialSync({setFeatureCompatibilityVersion: latestFCV}, /*initialFCV*/ lastStableFCV);
-
- // Modifications to the featureCompatibilityVersion document during initial sync should be
- // caught and cause initial sync to fail.
- runInitialSync({
- update: 'system.version',
- updates: [{q: {_id: 'featureCompatibilityVersion'}, u: {'version': lastStableFCV}}]
- },
- /*initialFCV*/ latestFCV);
-
- rst.stopSet();
+'use strict';
+
+load("jstests/libs/feature_compatibility_version.js");
+load('jstests/libs/check_log.js');
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+
+// We disallow the secondary node from voting so that the primary's featureCompatibilityVersion
+// can be modified while the secondary node is still waiting to complete its initial sync.
+const replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+replSetConfig.members[1].votes = 0;
+rst.initiate(replSetConfig);
+
+const primary = rst.getPrimary();
+const dbName = 'foo';
+const collName = 'bar';
+
+assert.writeOK(primary.getDB(dbName).getCollection(collName).insert({a: 1}));
+
+function runInitialSync(cmd, initialFCV) {
+ assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: initialFCV}));
+
+ jsTestLog('Testing setting fCV with ' + tojson(cmd));
+
+ const failPointOptions = tojson({mode: 'alwaysOn', data: {database: dbName}});
+ rst.restart(1, {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangBeforeListCollections': failPointOptions,
+ 'failpoint.skipClearInitialSyncState': tojson({mode: 'alwaysOn'}),
+ numInitialSyncAttempts: 2
+ }
+ });
+ const secondary = rst.nodes[1];
+
+ // Initial sync clones the 'admin' database first, which will set the fCV on the
+ // secondary to initialFCV. We then block the secondary before issuing 'listCollections' on
+ // the test database.
+ checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeListCollections fail point enabled');
+
+ // Initial sync is stopped right before 'listCollections' on the test database. We now run
+ // the test command to modify the fCV.
+ assert.commandWorked(primary.adminCommand(cmd));
+
+ // Let initial sync finish, making sure that it fails due to the feature compatibility
+ // version change.
+ assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeListCollections', mode: 'off'}));
+ checkLog.contains(secondary, 'Applying operation on feature compatibility version document');
+
+ jsTestLog('Wait for both nodes to be up-to-date');
+ rst.awaitSecondaryNodes();
+ rst.awaitReplication();
+
+ let res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+ assert.eq(res.initialSyncStatus.failedInitialSyncAttempts, 1);
+
+ // We check oplogs and data hashes before we restart the second node.
+ rst.checkOplogs();
+ rst.checkReplicatedDataHashes();
+}
+
+// Ensure that attempting to downgrade the featureCompatibilityVersion during initial sync
+// fails.
+runInitialSync({setFeatureCompatibilityVersion: lastStableFCV}, /*initialFCV*/ latestFCV);
+
+// Ensure that attempting to upgrade the featureCompatibilityVersion during initial sync fails.
+runInitialSync({setFeatureCompatibilityVersion: latestFCV}, /*initialFCV*/ lastStableFCV);
+
+// Modifications to the featureCompatibilityVersion document during initial sync should be
+// caught and cause initial sync to fail.
+runInitialSync({
+ update: 'system.version',
+ updates: [{q: {_id: 'featureCompatibilityVersion'}, u: {'version': lastStableFCV}}]
+},
+ /*initialFCV*/ latestFCV);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js
index a15f5290ea6..eea0ebfab20 100644
--- a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js
+++ b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp.js
@@ -14,192 +14,194 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
-
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
-
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName = "initial_sync_fetch_from_oldest_active_transaction_timestamp";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 1}));
-
- jsTestLog("Preparing a transaction that will later be committed");
-
- const session1 = primary.startSession();
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 2}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
-
- jsTestLog("Preparing a transaction that will later be the oldest active transaction");
-
- // Prepare a transaction so that there is an active transaction with an oplog entry. The
- // timestamp of the first oplog entry of this transaction will become the beginFetchingTimestamp
- // during initial sync.
- let session2 = primary.startSession();
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 1}, {_id: 1, a: 1}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
-
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
-
- const oplog = primary.getDB("local").getCollection("oplog.rs");
- const txnNum = session2.getTxnNumber_forTesting();
- const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session2.getSessionId().id});
- assert.neq(op, null);
- const beginFetchingTs = op.ts;
- jsTestLog("Expected beginFetchingTimestamp: " + beginFetchingTs);
-
- // Commit the first transaction so that we have an operation that is fetched during initial sync
- // but should not be applied. If this is applied, initial sync will fail because while trying to
- // apply the commitTransaction oplog entry, it will fail to get the prepare oplog
- // entry since its optime is before the beginFetchingTimestamp. Doing another operation will
- // also cause the beginApplyingTimestamp to be different from the beginFetchingTimestamp. Note
- // that since the beginApplyingTimestamp is the timestamp after which operations are applied
- // during initial sync, this commitTransaction will not be applied.
- const beginApplyingTimestamp =
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1))
- .operationTime;
-
- jsTestLog("beginApplyingTimestamp: " + beginApplyingTimestamp);
-
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync after the secondary
- // has copied {_id: 1} and {_id: 2}. This way we can insert more documents when initial sync is
- // paused and know that they won't be copied during collection cloning but instead must be
- // applied during oplog application.
- replTest.stop(secondary,
- // signal
- undefined,
- // Validation would encounter a prepare conflict on the open transaction.
- {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 2}}),
- 'numInitialSyncAttempts': 1
- }
- },
- true /* wait */);
-
- jsTestLog("Secondary was restarted");
-
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
-
- jsTestLog("Running operations while collection cloning is paused");
-
- // Run some operations on the sync source while collection cloning is paused so that we know
- // they must be applied during the oplog application stage of initial sync. This will also make
- // sure that the beginApplyingTimestamp and the stopTimestamp in initial sync are different. The
- // stopTimestamp is the timestamp of the oplog entry that was last applied on the sync source
- // when the oplog application phase of initial sync begins.
- const stopTimestamp =
- assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 4}]})).operationTime;
-
- jsTestLog("stopTimestamp: " + stopTimestamp);
-
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
-
- jsTestLog("Initial sync resumed");
-
- // Wait for the secondary to complete initial sync.
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
- replTest.awaitReplication();
-
- jsTestLog("Initial sync completed");
-
- // Make sure the secondary fetched enough transaction oplog entries.
- secondary.setSlaveOk();
- const secondaryOplog = secondary.getDB("local").getCollection("oplog.rs");
- assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
-
- // Make sure the first transaction committed properly and is reflected after the initial sync.
- let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
- assert.docEq(res, {_id: 2}, res);
-
- jsTestLog("Stepping up the secondary");
-
- // Step up the secondary after initial sync is done and make sure the transaction is properly
- // prepared.
- replTest.stepUp(secondary);
- replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
- const newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB(dbName);
- testColl = testDB.getCollection(collName);
-
- // Force the second session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
- session2.setTxnNumber_forTesting(txnNumber2);
- sessionDB2 = session2.getDatabase(dbName);
-
- jsTestLog("Checking that the second transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the second prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 1}).toArray(), [{_id: 1}]);
-
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- documents: [{_id: 3}],
- txnNumber: NumberLong(txnNumber2),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Committing the second transaction");
-
- // Make sure we can successfully commit the second transaction after recovery.
- assert.commandWorked(sessionDB2.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp2,
- txnNumber: NumberLong(txnNumber2),
- autocommit: false
- }));
- assert.eq(testColl.find({_id: 1}).toArray(), [{_id: 1, a: 1}]);
-
- jsTestLog("Attempting to run another transaction");
-
- // Make sure that we can run another conflicting transaction without any problems.
- session2.startTransaction();
- assert.commandWorked(sessionDB2[collName].update({_id: 1}, {_id: 1, a: 2}));
- prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- assert.eq(testColl.findOne({_id: 1}), {_id: 1, a: 2});
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
+
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "initial_sync_fetch_from_oldest_active_transaction_timestamp";
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 1}));
+
+jsTestLog("Preparing a transaction that will later be committed");
+
+const session1 = primary.startSession();
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 2}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+
+jsTestLog("Preparing a transaction that will later be the oldest active transaction");
+
+// Prepare a transaction so that there is an active transaction with an oplog entry. The
+// timestamp of the first oplog entry of this transaction will become the beginFetchingTimestamp
+// during initial sync.
+let session2 = primary.startSession();
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 1}, {_id: 1, a: 1}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
+
+const oplog = primary.getDB("local").getCollection("oplog.rs");
+const txnNum = session2.getTxnNumber_forTesting();
+const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session2.getSessionId().id});
+assert.neq(op, null);
+const beginFetchingTs = op.ts;
+jsTestLog("Expected beginFetchingTimestamp: " + beginFetchingTs);
+
+// Commit the first transaction so that we have an operation that is fetched during initial sync
+// but should not be applied. If this is applied, initial sync will fail because while trying to
+// apply the commitTransaction oplog entry, it will fail to get the prepare oplog
+// entry since its optime is before the beginFetchingTimestamp. Doing another operation will
+// also cause the beginApplyingTimestamp to be different from the beginFetchingTimestamp. Note
+// that since the beginApplyingTimestamp is the timestamp after which operations are applied
+// during initial sync, this commitTransaction will not be applied.
+const beginApplyingTimestamp =
+ assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1))
+ .operationTime;
+
+jsTestLog("beginApplyingTimestamp: " + beginApplyingTimestamp);
+
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync after the secondary
+// has copied {_id: 1} and {_id: 2}. This way we can insert more documents when initial sync is
+// paused and know that they won't be copied during collection cloning but instead must be
+// applied during oplog application.
+replTest.stop(secondary,
+ // signal
+ undefined,
+ // Validation would encounter a prepare conflict on the open transaction.
+ {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 2}}),
+ 'numInitialSyncAttempts': 1
+ }
+ },
+ true /* wait */);
+
+jsTestLog("Secondary was restarted");
+
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+
+jsTestLog("Running operations while collection cloning is paused");
+
+// Run some operations on the sync source while collection cloning is paused so that we know
+// they must be applied during the oplog application stage of initial sync. This will also make
+// sure that the beginApplyingTimestamp and the stopTimestamp in initial sync are different. The
+// stopTimestamp is the timestamp of the oplog entry that was last applied on the sync source
+// when the oplog application phase of initial sync begins.
+const stopTimestamp =
+ assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 4}]})).operationTime;
+
+jsTestLog("stopTimestamp: " + stopTimestamp);
+
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+
+jsTestLog("Initial sync resumed");
+
+// Wait for the secondary to complete initial sync.
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+replTest.awaitReplication();
+
+jsTestLog("Initial sync completed");
+
+// Make sure the secondary fetched enough transaction oplog entries.
+secondary.setSlaveOk();
+const secondaryOplog = secondary.getDB("local").getCollection("oplog.rs");
+assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
+
+// Make sure the first transaction committed properly and is reflected after the initial sync.
+let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
+assert.docEq(res, {_id: 2}, res);
+
+jsTestLog("Stepping up the secondary");
+
+// Step up the secondary after initial sync is done and make sure the transaction is properly
+// prepared.
+replTest.stepUp(secondary);
+replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
+const newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+
+// Force the second session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
+session2.setTxnNumber_forTesting(txnNumber2);
+sessionDB2 = session2.getDatabase(dbName);
+
+jsTestLog("Checking that the second transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the second prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 1}).toArray(), [{_id: 1}]);
+
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ documents: [{_id: 3}],
+ txnNumber: NumberLong(txnNumber2),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Committing the second transaction");
+
+// Make sure we can successfully commit the second transaction after recovery.
+assert.commandWorked(sessionDB2.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp2,
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false
+}));
+assert.eq(testColl.find({_id: 1}).toArray(), [{_id: 1, a: 1}]);
+
+jsTestLog("Attempting to run another transaction");
+
+// Make sure that we can run another conflicting transaction without any problems.
+session2.startTransaction();
+assert.commandWorked(sessionDB2[collName].update({_id: 1}, {_id: 1, a: 2}));
+prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+assert.eq(testColl.findOne({_id: 1}), {_id: 1, a: 2});
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js
index 898d1303383..9a4ed0368a5 100644
--- a/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js
+++ b/jstests/replsets/initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application.js
@@ -18,100 +18,98 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
- replTest.startSet();
- replTest.initiate();
-
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName =
- "initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 1}));
-
- jsTestLog("Preparing a transaction that will later be committed");
-
- const session1 = primary.startSession({causalConsistency: false});
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 2}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
-
- jsTestLog("Preparing a transaction that will be the oldest active transaction");
-
- // Prepare a transaction so that there is an active transaction with an oplog entry. The
- // timestamp of the first oplog entry of this transaction will become the beginFetchingTimestamp
- // during initial sync.
- const session2 = primary.startSession({causalConsistency: false});
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 3}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
-
- const oplog = primary.getDB("local").getCollection("oplog.rs");
- const txnNum = session2.getTxnNumber_forTesting();
- const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session2.getSessionId().id});
- assert.neq(op, null);
- const beginFetchingTs = op.ts;
- jsTestLog("Expected beginFetchingTimestamp: " + beginFetchingTs);
-
- // Commit the first transaction so that we have an operation that is fetched during initial sync
- // but should not be applied. If this is applied, initial sync will fail because while trying to
- // apply the commitTransaction oplog entry, it will fail to get the prepareTransaction oplog
- // entry since its optime is before the beginFetchingTimestamp. Doing another operation will
- // also cause the beginApplyingTimestamp to be different from the beginFetchingTimestamp. Note
- // that since the beginApplyingTimestamp is the timestamp after which operations are applied
- // during initial sync, this commitTransaction will not be applied.
- const beginApplyingTimestamp =
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1))
- .operationTime;
-
- jsTestLog("beginApplyingTimestamp/stopTimestamp: " + beginApplyingTimestamp);
-
- // Restart the secondary with startClean set to true so that it goes through initial sync. Since
- // we won't be running any operations during collection cloning, the beginApplyingTimestamp and
- // stopTimestamp should be the same.
- replTest.stop(secondary,
- // signal
- undefined,
- // Validation would encounter a prepare conflict on the open transaction.
- {skipValidation: true});
- secondary = replTest.start(secondary,
- {startClean: true, setParameter: {'numInitialSyncAttempts': 1}},
- true /* wait */);
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
-
- jsTestLog("Secondary was restarted");
-
- // Wait for the secondary to complete initial sync.
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
-
- jsTestLog("Initial sync completed");
-
- // Make sure the secondary fetched enough transaction oplog entries.
- secondary.setSlaveOk();
- const secondaryOplog = secondary.getDB("local").getCollection("oplog.rs");
- assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
-
- // Make sure the first transaction committed properly and is reflected after the initial sync.
- let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
- assert.docEq(res, {_id: 2}, res);
-
- jsTestLog("Aborting the second transaction");
-
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]});
+replTest.startSet();
+replTest.initiate();
+
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "initial_sync_fetch_from_oldest_active_transaction_timestamp_no_oplog_application";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 1}));
+
+jsTestLog("Preparing a transaction that will later be committed");
+
+const session1 = primary.startSession({causalConsistency: false});
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 2}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+
+jsTestLog("Preparing a transaction that will be the oldest active transaction");
+
+// Prepare a transaction so that there is an active transaction with an oplog entry. The
+// timestamp of the first oplog entry of this transaction will become the beginFetchingTimestamp
+// during initial sync.
+const session2 = primary.startSession({causalConsistency: false});
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 3}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+
+const oplog = primary.getDB("local").getCollection("oplog.rs");
+const txnNum = session2.getTxnNumber_forTesting();
+const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session2.getSessionId().id});
+assert.neq(op, null);
+const beginFetchingTs = op.ts;
+jsTestLog("Expected beginFetchingTimestamp: " + beginFetchingTs);
+
+// Commit the first transaction so that we have an operation that is fetched during initial sync
+// but should not be applied. If this is applied, initial sync will fail because while trying to
+// apply the commitTransaction oplog entry, it will fail to get the prepareTransaction oplog
+// entry since its optime is before the beginFetchingTimestamp. Doing another operation will
+// also cause the beginApplyingTimestamp to be different from the beginFetchingTimestamp. Note
+// that since the beginApplyingTimestamp is the timestamp after which operations are applied
+// during initial sync, this commitTransaction will not be applied.
+const beginApplyingTimestamp =
+ assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1))
+ .operationTime;
+
+jsTestLog("beginApplyingTimestamp/stopTimestamp: " + beginApplyingTimestamp);
+
+// Restart the secondary with startClean set to true so that it goes through initial sync. Since
+// we won't be running any operations during collection cloning, the beginApplyingTimestamp and
+// stopTimestamp should be the same.
+replTest.stop(secondary,
+ // signal
+ undefined,
+ // Validation would encounter a prepare conflict on the open transaction.
+ {skipValidation: true});
+secondary = replTest.start(
+ secondary, {startClean: true, setParameter: {'numInitialSyncAttempts': 1}}, true /* wait */);
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+
+jsTestLog("Secondary was restarted");
+
+// Wait for the secondary to complete initial sync.
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+
+jsTestLog("Initial sync completed");
+
+// Make sure the secondary fetched enough transaction oplog entries.
+secondary.setSlaveOk();
+const secondaryOplog = secondary.getDB("local").getCollection("oplog.rs");
+assert.eq(secondaryOplog.find({"ts": beginFetchingTs}).itcount(), 1);
+
+// Make sure the first transaction committed properly and is reflected after the initial sync.
+let res = secondary.getDB(dbName).getCollection(collName).findOne({_id: 2});
+assert.docEq(res, {_id: 2}, res);
+
+jsTestLog("Aborting the second transaction");
+
+assert.commandWorked(session2.abortTransaction_forTesting());
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_invalid_index_spec.js b/jstests/replsets/initial_sync_invalid_index_spec.js
index 1d329b94b26..24f8b773118 100644
--- a/jstests/replsets/initial_sync_invalid_index_spec.js
+++ b/jstests/replsets/initial_sync_invalid_index_spec.js
@@ -4,53 +4,53 @@
*/
(function() {
- "use strict";
+"use strict";
- // Skip db hash check because of invalid index spec.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check because of invalid index spec.
+TestData.skipCheckDBHashes = true;
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const testName = "initial_sync_invalid_index_spec";
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const testName = "initial_sync_invalid_index_spec";
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- let primaryDB = replTest.getPrimary().getDB(testName);
+let primaryDB = replTest.getPrimary().getDB(testName);
- // Create a V2 index with invalid spec field.
- primaryDB.adminCommand(
- {configureFailPoint: "skipIndexCreateFieldNameValidation", mode: "alwaysOn"});
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: "test", indexes: [{v: 2, name: "x_1", key: {x: 1}, invalidOption: 1}]}));
+// Create a V2 index with invalid spec field.
+primaryDB.adminCommand(
+ {configureFailPoint: "skipIndexCreateFieldNameValidation", mode: "alwaysOn"});
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: "test", indexes: [{v: 2, name: "x_1", key: {x: 1}, invalidOption: 1}]}));
- // Add another node to the replica set to allow an initial sync to occur.
- var initSyncNode = replTest.add();
- var initSyncNodeAdminDB = initSyncNode.getDB("admin");
+// Add another node to the replica set to allow an initial sync to occur.
+var initSyncNode = replTest.add();
+var initSyncNodeAdminDB = initSyncNode.getDB("admin");
- clearRawMongoProgramOutput();
- reInitiateWithoutThrowingOnAbortedMember(replTest);
+clearRawMongoProgramOutput();
+reInitiateWithoutThrowingOnAbortedMember(replTest);
- assert.soon(
- function() {
- try {
- initSyncNodeAdminDB.runCommand({ping: 1});
- } catch (e) {
- return true;
- }
- return false;
- },
- "Node did not terminate due to invalid index spec during initial sync",
- ReplSetTest.kDefaultTimeoutMS);
+assert.soon(
+ function() {
+ try {
+ initSyncNodeAdminDB.runCommand({ping: 1});
+ } catch (e) {
+ return true;
+ }
+ return false;
+ },
+ "Node did not terminate due to invalid index spec during initial sync",
+ ReplSetTest.kDefaultTimeoutMS);
- replTest.stop(initSyncNode, undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
+replTest.stop(initSyncNode, undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
- const msgInvalidOption = "The field 'invalidOption' is not valid for an index specification";
- const msgInitialSyncFatalAssertion = "Fatal assertion 40088 InitialSyncFailure";
+const msgInvalidOption = "The field 'invalidOption' is not valid for an index specification";
+const msgInitialSyncFatalAssertion = "Fatal assertion 40088 InitialSyncFailure";
- assert(rawMongoProgramOutput().match(msgInvalidOption) &&
- rawMongoProgramOutput().match(msgInitialSyncFatalAssertion),
- "Initial sync should have aborted on invalid index specification");
+assert(rawMongoProgramOutput().match(msgInvalidOption) &&
+ rawMongoProgramOutput().match(msgInitialSyncFatalAssertion),
+ "Initial sync should have aborted on invalid index specification");
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_invalid_views.js b/jstests/replsets/initial_sync_invalid_views.js
index 056f7d5694e..a02498aaa40 100644
--- a/jstests/replsets/initial_sync_invalid_views.js
+++ b/jstests/replsets/initial_sync_invalid_views.js
@@ -3,36 +3,36 @@
// views were present. This test ensures that crashes no longer occur in those circumstances.
(function() {
- 'use strict';
+'use strict';
- const name = "initial_sync_invalid_views";
- let replSet = new ReplSetTest({name: name, nodes: 1});
+const name = "initial_sync_invalid_views";
+let replSet = new ReplSetTest({name: name, nodes: 1});
- let oplogSizeOnPrimary = 1; // size in MB
- replSet.startSet({oplogSize: oplogSizeOnPrimary});
- replSet.initiate();
- let primary = replSet.getPrimary();
+let oplogSizeOnPrimary = 1; // size in MB
+replSet.startSet({oplogSize: oplogSizeOnPrimary});
+replSet.initiate();
+let primary = replSet.getPrimary();
- let coll = primary.getDB('test').foo;
- assert.writeOK(coll.insert({a: 1}));
+let coll = primary.getDB('test').foo;
+assert.writeOK(coll.insert({a: 1}));
- // Add a secondary node but make it hang before copying databases.
- let secondary = replSet.add();
- secondary.setSlaveOk();
+// Add a secondary node but make it hang before copying databases.
+let secondary = replSet.add();
+secondary.setSlaveOk();
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
- replSet.reInitiate();
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+replSet.reInitiate();
- assert.writeOK(primary.getDB('test').system.views.insert({invalid: NumberLong(1000)}));
+assert.writeOK(primary.getDB('test').system.views.insert({invalid: NumberLong(1000)}));
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
- replSet.awaitSecondaryNodes(200 * 1000);
+replSet.awaitSecondaryNodes(200 * 1000);
- // Skip collection validation during stopMongod if invalid views exists.
- TestData.skipValidationOnInvalidViewDefinitions = true;
+// Skip collection validation during stopMongod if invalid views exists.
+TestData.skipValidationOnInvalidViewDefinitions = true;
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_move_forward.js b/jstests/replsets/initial_sync_move_forward.js
index 070e3243be5..2561e16b0c1 100644
--- a/jstests/replsets/initial_sync_move_forward.js
+++ b/jstests/replsets/initial_sync_move_forward.js
@@ -12,88 +12,87 @@
// This also works for wiredTiger, because we grow the document by deleting and reinserting it, so
// the newly inserted document is included in the cursor on the source.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- var rst = new ReplSetTest({name: "initial_sync_move_forward", nodes: 1});
- rst.startSet();
- rst.initiate();
+var rst = new ReplSetTest({name: "initial_sync_move_forward", nodes: 1});
+rst.startSet();
+rst.initiate();
- var masterColl = rst.getPrimary().getDB("test").coll;
+var masterColl = rst.getPrimary().getDB("test").coll;
- // Insert 500000 documents. Make the last two documents larger, so that {_id: 0, x: 0} and {_id:
- // 1, x: 1} will fit into their positions when we grow them.
- var count = 500000;
- var bulk = masterColl.initializeUnorderedBulkOp();
- for (var i = 0; i < count - 2; ++i) {
- bulk.insert({_id: i, x: i});
- }
- var longString = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
- bulk.insert({_id: count - 2, x: count - 2, longString: longString});
- bulk.insert({_id: count - 1, x: count - 1, longString: longString});
- assert.writeOK(bulk.execute());
+// Insert 500000 documents. Make the last two documents larger, so that {_id: 0, x: 0} and {_id:
+// 1, x: 1} will fit into their positions when we grow them.
+var count = 500000;
+var bulk = masterColl.initializeUnorderedBulkOp();
+for (var i = 0; i < count - 2; ++i) {
+ bulk.insert({_id: i, x: i});
+}
+var longString = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+bulk.insert({_id: count - 2, x: count - 2, longString: longString});
+bulk.insert({_id: count - 1, x: count - 1, longString: longString});
+assert.writeOK(bulk.execute());
- // Create a unique index on {x: 1}.
- assert.commandWorked(masterColl.ensureIndex({x: 1}, {unique: true}));
+// Create a unique index on {x: 1}.
+assert.commandWorked(masterColl.ensureIndex({x: 1}, {unique: true}));
- // Add a secondary.
- var secondary = rst.add({setParameter: "numInitialSyncAttempts=1"});
- secondary.setSlaveOk();
- var secondaryColl = secondary.getDB("test").coll;
+// Add a secondary.
+var secondary = rst.add({setParameter: "numInitialSyncAttempts=1"});
+secondary.setSlaveOk();
+var secondaryColl = secondary.getDB("test").coll;
- // Pause initial sync when the secondary has copied {_id: 0, x: 0} and {_id: 1, x: 1}.
- assert.commandWorked(secondary.adminCommand({
- configureFailPoint: "initialSyncHangDuringCollectionClone",
- data: {namespace: secondaryColl.getFullName(), numDocsToClone: 2},
- mode: "alwaysOn"
- }));
- rst.reInitiate();
- assert.soon(function() {
- var logMessages = assert.commandWorked(secondary.adminCommand({getLog: "global"})).log;
- for (var i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(
- "initial sync - initialSyncHangDuringCollectionClone fail point enabled") !=
- -1) {
- return true;
- }
+// Pause initial sync when the secondary has copied {_id: 0, x: 0} and {_id: 1, x: 1}.
+assert.commandWorked(secondary.adminCommand({
+ configureFailPoint: "initialSyncHangDuringCollectionClone",
+ data: {namespace: secondaryColl.getFullName(), numDocsToClone: 2},
+ mode: "alwaysOn"
+}));
+rst.reInitiate();
+assert.soon(function() {
+ var logMessages = assert.commandWorked(secondary.adminCommand({getLog: "global"})).log;
+ for (var i = 0; i < logMessages.length; i++) {
+ if (logMessages[i].indexOf(
+ "initial sync - initialSyncHangDuringCollectionClone fail point enabled") != -1) {
+ return true;
}
- return false;
- });
+ }
+ return false;
+});
- // Delete {_id: count - 2} to make a hole. Grow {_id: 0} so that it moves into that hole. This
- // will cause the secondary to clone {_id: 0} again.
- // Change the value for 'x' so that we are not testing the uniqueness of 'x' in this case.
- assert.writeOK(masterColl.remove({_id: 0, x: 0}));
- assert.writeOK(masterColl.remove({_id: count - 2, x: count - 2}));
- assert.writeOK(masterColl.insert({_id: 0, x: count, longString: longString}));
+// Delete {_id: count - 2} to make a hole. Grow {_id: 0} so that it moves into that hole. This
+// will cause the secondary to clone {_id: 0} again.
+// Change the value for 'x' so that we are not testing the uniqueness of 'x' in this case.
+assert.writeOK(masterColl.remove({_id: 0, x: 0}));
+assert.writeOK(masterColl.remove({_id: count - 2, x: count - 2}));
+assert.writeOK(masterColl.insert({_id: 0, x: count, longString: longString}));
- // Delete {_id: count - 1} to make a hole. Grow {x: 1} so that it moves into that hole. This
- // will cause the secondary to clone {x: 1} again.
- // Change the value for _id so that we are not testing the uniqueness of _id in this case.
- assert.writeOK(masterColl.remove({_id: 1, x: 1}));
- assert.writeOK(masterColl.remove({_id: count - 1, x: count - 1}));
- assert.writeOK(masterColl.insert({_id: count, x: 1, longString: longString}));
+// Delete {_id: count - 1} to make a hole. Grow {x: 1} so that it moves into that hole. This
+// will cause the secondary to clone {x: 1} again.
+// Change the value for _id so that we are not testing the uniqueness of _id in this case.
+assert.writeOK(masterColl.remove({_id: 1, x: 1}));
+assert.writeOK(masterColl.remove({_id: count - 1, x: count - 1}));
+assert.writeOK(masterColl.insert({_id: count, x: 1, longString: longString}));
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
- // Wait for initial sync to finish.
- rst.awaitSecondaryNodes();
+// Wait for initial sync to finish.
+rst.awaitSecondaryNodes();
- // Check document count on secondary.
- assert.eq(count - 2, secondaryColl.find().itcount());
+// Check document count on secondary.
+assert.eq(count - 2, secondaryColl.find().itcount());
- // Check for {_id: 0} on secondary.
- assert.eq(1, secondaryColl.find({_id: 0, x: count}).itcount());
+// Check for {_id: 0} on secondary.
+assert.eq(1, secondaryColl.find({_id: 0, x: count}).itcount());
- // Check for {x: 1} on secondary.
- assert.eq(1, secondaryColl.find({_id: count, x: 1}).itcount());
+// Check for {x: 1} on secondary.
+assert.eq(1, secondaryColl.find({_id: count, x: 1}).itcount());
- // Check for unique index on secondary.
- var indexSpec = GetIndexHelpers.findByKeyPattern(secondaryColl.getIndexes(), {x: 1});
- assert.neq(null, indexSpec);
- assert.eq(true, indexSpec.unique);
- rst.stopSet();
+// Check for unique index on secondary.
+var indexSpec = GetIndexHelpers.findByKeyPattern(secondaryColl.getIndexes(), {x: 1});
+assert.neq(null, indexSpec);
+assert.eq(true, indexSpec.unique);
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_oplog_hole.js b/jstests/replsets/initial_sync_oplog_hole.js
index a6805102120..190099cd571 100644
--- a/jstests/replsets/initial_sync_oplog_hole.js
+++ b/jstests/replsets/initial_sync_oplog_hole.js
@@ -4,96 +4,95 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/replsets/rslib.js");
+load("jstests/libs/check_log.js");
+load("jstests/replsets/rslib.js");
- // Set up replica set. Disallow chaining so nodes always sync from primary.
- const testName = "initial_sync_oplog_hole";
- const dbName = testName;
- // Set up a three-node replset. The first node is primary throughout the test, the second node
- // is used as the initial sync node, and the third node is to ensure we maintain a majority (and
- // thus no election) while restarting the second.
- const replTest = new ReplSetTest({
- name: testName,
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- replTest.startSet();
- replTest.initiate();
+// Set up replica set. Disallow chaining so nodes always sync from primary.
+const testName = "initial_sync_oplog_hole";
+const dbName = testName;
+// Set up a three-node replset. The first node is primary throughout the test, the second node
+// is used as the initial sync node, and the third node is to ensure we maintain a majority (and
+// thus no election) while restarting the second.
+const replTest = new ReplSetTest({
+ name: testName,
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const secondary = replTest.getSecondary();
- const secondaryDB = secondary.getDB(dbName);
- const collName = "testcoll";
- const primaryColl = primaryDB[collName];
- const secondaryColl = secondaryDB[collName];
- const nss = primaryColl.getFullName();
- TestData.testName = testName;
- TestData.collectionName = collName;
+const primary = replTest.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const secondary = replTest.getSecondary();
+const secondaryDB = secondary.getDB(dbName);
+const collName = "testcoll";
+const primaryColl = primaryDB[collName];
+const secondaryColl = secondaryDB[collName];
+const nss = primaryColl.getFullName();
+TestData.testName = testName;
+TestData.collectionName = collName;
- jsTestLog("Writing data before oplog hole to collection.");
- assert.writeOK(primaryColl.insert({_id: "a"}));
- // Make sure it gets written out.
- assert.eq(primaryColl.find({_id: "a"}).itcount(), 1);
+jsTestLog("Writing data before oplog hole to collection.");
+assert.writeOK(primaryColl.insert({_id: "a"}));
+// Make sure it gets written out.
+assert.eq(primaryColl.find({_id: "a"}).itcount(), 1);
- jsTest.log("Create the uncommitted write.");
- assert.commandWorked(primaryDB.adminCommand({
- configureFailPoint: "hangAfterCollectionInserts",
- mode: "alwaysOn",
- data: {collectionNS: primaryColl.getFullName(), first_id: "b"}
- }));
+jsTest.log("Create the uncommitted write.");
+assert.commandWorked(primaryDB.adminCommand({
+ configureFailPoint: "hangAfterCollectionInserts",
+ mode: "alwaysOn",
+ data: {collectionNS: primaryColl.getFullName(), first_id: "b"}
+}));
- const db = primaryDB;
- const joinHungWrite = startParallelShell(() => {
- assert.commandWorked(
- db.getSiblingDB(TestData.testName)[TestData.collectionName].insert({_id: "b"}));
- }, primary.port);
- checkLog.contains(
- primaryDB.getMongo(),
- "hangAfterCollectionInserts fail point enabled for " + primaryColl.getFullName());
+const db = primaryDB;
+const joinHungWrite = startParallelShell(() => {
+ assert.commandWorked(
+ db.getSiblingDB(TestData.testName)[TestData.collectionName].insert({_id: "b"}));
+}, primary.port);
+checkLog.contains(primaryDB.getMongo(),
+ "hangAfterCollectionInserts fail point enabled for " + primaryColl.getFullName());
- jsTest.log("Create a write following the uncommitted write.");
- assert.writeOK(primaryColl.insert({_id: "c"}));
- // Make sure it gets written out.
- assert.eq(primaryColl.find({_id: "c"}).itcount(), 1);
+jsTest.log("Create a write following the uncommitted write.");
+assert.writeOK(primaryColl.insert({_id: "c"}));
+// Make sure it gets written out.
+assert.eq(primaryColl.find({_id: "c"}).itcount(), 1);
- jsTestLog("Restarting initial sync node.");
- replTest.restart(secondary, {startClean: true});
- jsTestLog("Waiting for initial sync to start.");
- checkLog.contains(secondaryDB.getMongo(), "Starting initial sync");
+jsTestLog("Restarting initial sync node.");
+replTest.restart(secondary, {startClean: true});
+jsTestLog("Waiting for initial sync to start.");
+checkLog.contains(secondaryDB.getMongo(), "Starting initial sync");
- // The 5 seconds is because in the non-buggy case, we'll be hung waiting for the optime to
- // advance. However, if we allow the write to finish immediately, we are likely to miss the
- // race if it happens. By allowing 5 seconds, we'll never fail when we should succeed, and
- // we'll nearly always fail when we should fail.
- //
- // Once the hangAfterCollectionInserts failpoint is turned off, the write of {_id: "b"} will
- // complete and both the data and the oplog entry for the write will be written out. The oplog
- // visibility thread will then close the oplog hole.
- jsTestLog("Allow the uncommitted write to finish in 5 seconds.");
- const joinDisableFailPoint = startParallelShell(() => {
- sleep(5000);
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "hangAfterCollectionInserts", mode: "off"}));
- }, primary.port);
+// The 5 seconds is because in the non-buggy case, we'll be hung waiting for the optime to
+// advance. However, if we allow the write to finish immediately, we are likely to miss the
+// race if it happens. By allowing 5 seconds, we'll never fail when we should succeed, and
+// we'll nearly always fail when we should fail.
+//
+// Once the hangAfterCollectionInserts failpoint is turned off, the write of {_id: "b"} will
+// complete and both the data and the oplog entry for the write will be written out. The oplog
+// visibility thread will then close the oplog hole.
+jsTestLog("Allow the uncommitted write to finish in 5 seconds.");
+const joinDisableFailPoint = startParallelShell(() => {
+ sleep(5000);
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "hangAfterCollectionInserts", mode: "off"}));
+}, primary.port);
- jsTestLog("Waiting for initial sync to complete.");
- waitForState(secondary, ReplSetTest.State.SECONDARY);
+jsTestLog("Waiting for initial sync to complete.");
+waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Joining hung write");
- joinDisableFailPoint();
- joinHungWrite();
+jsTestLog("Joining hung write");
+joinDisableFailPoint();
+joinHungWrite();
- jsTestLog("Checking that primary has all data items.");
- // Make sure the primary collection has all three data items.
- assert.docEq(primaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
+jsTestLog("Checking that primary has all data items.");
+// Make sure the primary collection has all three data items.
+assert.docEq(primaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
- jsTestLog("Checking that secondary has all data items.");
- replTest.awaitReplication();
- assert.docEq(secondaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
+jsTestLog("Checking that secondary has all data items.");
+replTest.awaitReplication();
+assert.docEq(secondaryColl.find().toArray(), [{"_id": "a"}, {"_id": "b"}, {"_id": "c"}]);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_oplog_rollover.js b/jstests/replsets/initial_sync_oplog_rollover.js
index ba1da0f14a8..7ffe8c98dd4 100644
--- a/jstests/replsets/initial_sync_oplog_rollover.js
+++ b/jstests/replsets/initial_sync_oplog_rollover.js
@@ -8,66 +8,65 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- var name = 'initial_sync_oplog_rollover';
- var replSet = new ReplSetTest({
- name: name,
- // This test requires a third node (added later) to be syncing when the oplog rolls
- // over. Rolling over the oplog requires a majority of nodes to have confirmed and
- // persisted those writes. Set the syncdelay to one to speed up checkpointing.
- nodeOptions: {syncdelay: 1},
- nodes: [
- {rsConfig: {priority: 1}},
- {rsConfig: {priority: 0}},
- ],
- });
+var name = 'initial_sync_oplog_rollover';
+var replSet = new ReplSetTest({
+ name: name,
+ // This test requires a third node (added later) to be syncing when the oplog rolls
+ // over. Rolling over the oplog requires a majority of nodes to have confirmed and
+ // persisted those writes. Set the syncdelay to one to speed up checkpointing.
+ nodeOptions: {syncdelay: 1},
+ nodes: [
+ {rsConfig: {priority: 1}},
+ {rsConfig: {priority: 0}},
+ ],
+});
- var oplogSizeOnPrimary = 1; // size in MB
- replSet.startSet({oplogSize: oplogSizeOnPrimary});
- replSet.initiate();
- var primary = replSet.getPrimary();
+var oplogSizeOnPrimary = 1; // size in MB
+replSet.startSet({oplogSize: oplogSizeOnPrimary});
+replSet.initiate();
+var primary = replSet.getPrimary();
- var coll = primary.getDB('test').foo;
- assert.writeOK(coll.insert({a: 1}));
+var coll = primary.getDB('test').foo;
+assert.writeOK(coll.insert({a: 1}));
- function getFirstOplogEntry(conn) {
- return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0];
- }
+function getFirstOplogEntry(conn) {
+ return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0];
+}
- var firstOplogEntry = getFirstOplogEntry(primary);
+var firstOplogEntry = getFirstOplogEntry(primary);
- // Add a secondary node but make it hang before copying databases.
- var secondary = replSet.add();
- secondary.setSlaveOk();
+// Add a secondary node but make it hang before copying databases.
+var secondary = replSet.add();
+secondary.setSlaveOk();
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
- replSet.reInitiate();
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+replSet.reInitiate();
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
- // Keep inserting large documents until they roll over the oplog.
- const largeStr = new Array(4 * 1024 * oplogSizeOnPrimary).join('aaaaaaaa');
- var i = 0;
- while (bsonWoCompare(getFirstOplogEntry(primary), firstOplogEntry) === 0) {
- assert.writeOK(coll.insert({a: 2, x: i++, long_str: largeStr}));
- sleep(100);
- }
+// Keep inserting large documents until they roll over the oplog.
+const largeStr = new Array(4 * 1024 * oplogSizeOnPrimary).join('aaaaaaaa');
+var i = 0;
+while (bsonWoCompare(getFirstOplogEntry(primary), firstOplogEntry) === 0) {
+ assert.writeOK(coll.insert({a: 2, x: i++, long_str: largeStr}));
+ sleep(100);
+}
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
- replSet.awaitSecondaryNodes(200 * 1000);
+replSet.awaitSecondaryNodes(200 * 1000);
- assert.eq(i,
- secondary.getDB('test').foo.count({a: 2}),
- 'collection successfully synced to secondary');
+assert.eq(
+ i, secondary.getDB('test').foo.count({a: 2}), 'collection successfully synced to secondary');
- assert.eq(0,
- secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
- "Oplog buffer was not dropped after initial sync");
- replSet.stopSet();
+assert.eq(0,
+ secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
+ "Oplog buffer was not dropped after initial sync");
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_preserves_active_txns.js b/jstests/replsets/initial_sync_preserves_active_txns.js
index 560b781a6e1..e037c1c29de 100644
--- a/jstests/replsets/initial_sync_preserves_active_txns.js
+++ b/jstests/replsets/initial_sync_preserves_active_txns.js
@@ -11,92 +11,91 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- // A new replica set for both the commit and abort tests to ensure the same clean state.
- function doTest(commitOrAbort) {
- const replSet = new ReplSetTest({
- oplogSize: PrepareHelpers.oplogSizeMB,
- // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions:
- {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
- nodes: 1
- });
+// A new replica set for both the commit and abort tests to ensure the same clean state.
+function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ oplogSize: PrepareHelpers.oplogSizeMB,
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: 1
+ });
- replSet.startSet(PrepareHelpers.replSetStartSetOptions);
- replSet.initiate();
- const primary = replSet.getPrimary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
- const coll = primary.getDB("test").test;
- assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
- jsTestLog("Prepare a transaction");
+ jsTestLog("Prepare a transaction");
- const session = primary.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- const txnEntry = primary.getDB("config").transactions.findOne();
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ const txnEntry = primary.getDB("config").transactions.findOne();
- const oldestRequiredTimestampForCrashRecovery =
- PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
- assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
+ const oldestRequiredTimestampForCrashRecovery =
+ PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
+ assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
- // Make sure that the timestamp of the first oplog entry for this transaction matches the
- // start timestamp in the transactions table.
- let oplog = primary.getDB("local").getCollection("oplog.rs");
- const txnNum = session.getTxnNumber_forTesting();
- const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session.getSessionId().id});
- assert.neq(op, null);
- const firstTxnOpTs = op.ts;
- assert.eq(txnEntry.startOpTime.ts, firstTxnOpTs, tojson(txnEntry));
+ // Make sure that the timestamp of the first oplog entry for this transaction matches the
+ // start timestamp in the transactions table.
+ let oplog = primary.getDB("local").getCollection("oplog.rs");
+ const txnNum = session.getTxnNumber_forTesting();
+ const op = oplog.findOne({"txnNumber": txnNum, "lsid.id": session.getSessionId().id});
+ assert.neq(op, null);
+ const firstTxnOpTs = op.ts;
+ assert.eq(txnEntry.startOpTime.ts, firstTxnOpTs, tojson(txnEntry));
- jsTestLog("Insert documents until oplog exceeds oplogSize");
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
- // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- PrepareHelpers.growOplogPastMaxSize(replSet);
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
- jsTestLog("Make sure the transaction's first entry is still in the oplog");
+ jsTestLog("Make sure the transaction's first entry is still in the oplog");
- assert.eq(primaryOplog.find({ts: firstTxnOpTs}).itcount(), 1);
+ assert.eq(primaryOplog.find({ts: firstTxnOpTs}).itcount(), 1);
- jsTestLog("Add a secondary node");
+ jsTestLog("Add a secondary node");
- const secondary = replSet.add({rsConfig: {votes: 0, priority: 0}});
- replSet.reInitiate();
+ const secondary = replSet.add({rsConfig: {votes: 0, priority: 0}});
+ replSet.reInitiate();
- jsTestLog("Reinitiated, awaiting secondary node");
+ jsTestLog("Reinitiated, awaiting secondary node");
- replSet.awaitSecondaryNodes();
+ replSet.awaitSecondaryNodes();
- jsTestLog("Checking secondary oplog and config.transactions");
+ jsTestLog("Checking secondary oplog and config.transactions");
- // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
- const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.gt(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
- assert.eq(secondaryOplog.find({ts: firstTxnOpTs}).itcount(), 1);
+ // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.gt(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ assert.eq(secondaryOplog.find({ts: firstTxnOpTs}).itcount(), 1);
- const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
- assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
- if (commitOrAbort === "commit") {
- jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- } else if (commitOrAbort === "abort") {
- jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(session.abortTransaction_forTesting());
- } else {
- throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
- }
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(session.abortTransaction_forTesting());
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
+ }
- replSet.awaitReplication();
+ replSet.awaitReplication();
- PrepareHelpers.awaitOplogTruncation(replSet);
- replSet.stopSet();
- }
- doTest("commit");
- doTest("abort");
+ PrepareHelpers.awaitOplogTruncation(replSet);
+ replSet.stopSet();
+}
+doTest("commit");
+doTest("abort");
})();
diff --git a/jstests/replsets/initial_sync_read_concern_no_oplog.js b/jstests/replsets/initial_sync_read_concern_no_oplog.js
index e52ac1faa06..6ad3974cea3 100644
--- a/jstests/replsets/initial_sync_read_concern_no_oplog.js
+++ b/jstests/replsets/initial_sync_read_concern_no_oplog.js
@@ -1,33 +1,33 @@
// Test that if an afterClusterTime query is issued to a node in initial sync that has not yet
// created its oplog, the node returns an error rather than crashing.
(function() {
- 'use strict';
- load('jstests/libs/check_log.js');
+'use strict';
+load('jstests/libs/check_log.js');
- const replSet = new ReplSetTest({nodes: 1});
+const replSet = new ReplSetTest({nodes: 1});
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const secondary = replSet.add();
+replSet.startSet();
+replSet.initiate();
+const primary = replSet.getPrimary();
+const secondary = replSet.add();
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCreatingOplog', mode: 'alwaysOn'}));
- replSet.reInitiate();
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCreatingOplog', mode: 'alwaysOn'}));
+replSet.reInitiate();
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCreatingOplog fail point enabled');
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCreatingOplog fail point enabled');
- assert.commandFailedWithCode(
- secondary.getDB('local').runCommand(
- {find: 'coll', limit: 1, readConcern: {afterClusterTime: Timestamp(1, 1)}}),
- ErrorCodes.NotYetInitialized);
+assert.commandFailedWithCode(
+ secondary.getDB('local').runCommand(
+ {find: 'coll', limit: 1, readConcern: {afterClusterTime: Timestamp(1, 1)}}),
+ ErrorCodes.NotYetInitialized);
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCreatingOplog', mode: 'off'}));
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCreatingOplog', mode: 'off'}));
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- replSet.stopSet();
+replSet.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/initial_sync_rename_collection.js b/jstests/replsets/initial_sync_rename_collection.js
index c4281704dec..9c63d7c55b1 100644
--- a/jstests/replsets/initial_sync_rename_collection.js
+++ b/jstests/replsets/initial_sync_rename_collection.js
@@ -5,101 +5,99 @@
*/
(function() {
- 'use strict';
-
- load('jstests/replsets/rslib.js');
- const basename = 'initial_sync_rename_collection';
-
- jsTestLog('Bring up a replica set');
- const rst = new ReplSetTest({name: basename, nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const db0_name = "db0";
- const db1_name = "db1";
-
- const primary = rst.getPrimary();
-
- // Create two separate databases so that we can rename a collection across databases.
- const primary_db0 = primary.getDB(db0_name);
- const primary_db1 = primary.getDB(db1_name);
-
- jsTestLog("Create collections on primary");
- const collRenameWithinDB_name = 'coll_1';
- const collRenameAcrossDBs_name = 'coll_2';
- const collWithinFinal_name = 'renamed';
- const collAcrossFinal_name = 'renamed_across';
-
- // Create two collections on the same database. One will be renamed within the database
- // and the other will be renamed to a different database.
- assert.writeOK(primary_db0[collRenameWithinDB_name].save({}));
- assert.writeOK(primary_db0[collRenameAcrossDBs_name].save({}));
-
- jsTestLog('Waiting for replication');
- rst.awaitReplication();
-
- jsTestLog('Bring up a new node');
- const secondary = rst.add({setParameter: 'numInitialSyncAttempts=1'});
-
- // Add a fail point that causes the secondary's initial sync to hang before
- // copying databases.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
-
- jsTestLog('Begin initial sync on secondary');
- let conf = rst.getPrimary().getDB('admin').runCommand({replSetGetConfig: 1}).config;
- conf.members.push({_id: 1, host: secondary.host, priority: 0, votes: 0});
- conf.version++;
- assert.commandWorked(rst.getPrimary().getDB('admin').runCommand({replSetReconfig: conf}));
- assert.eq(primary, rst.getPrimary(), 'Primary changed after reconfig');
-
- // Confirm that initial sync started on the secondary node.
- jsTestLog('Waiting for initial sync to start');
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
-
- // Start renaming collections while initial sync is hanging.
- jsTestLog('Rename collection ' + db0_name + '.' + collRenameWithinDB_name + ' to ' + db0_name +
- '.' + collWithinFinal_name + ' on the sync source ' + db0_name);
- assert.commandWorked(
- primary_db0[collRenameWithinDB_name].renameCollection(collWithinFinal_name));
-
- jsTestLog('Rename collection ' + db0_name + '.' + collRenameAcrossDBs_name + ' to ' + db1_name +
- '.' + collAcrossFinal_name + ' on the sync source ' + db0_name);
- assert.commandWorked(primary.adminCommand({
- renameCollection: primary_db0[collRenameAcrossDBs_name].getFullName(),
- to: primary_db1[collAcrossFinal_name]
- .getFullName() // Collection 'renamed_across' is implicitly created.
- }));
-
- // Disable fail point so that the secondary can finish its initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
-
- jsTestLog('Wait for both nodes to be up-to-date');
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
-
- const secondary_db0 = secondary.getDB(db0_name);
- const secondary_db1 = secondary.getDB(db1_name);
-
- jsTestLog('Check that collection was renamed correctly on the secondary');
- assert.eq(secondary_db0[collWithinFinal_name].find().itcount(),
- 1,
- 'renamed collection does not exist');
- assert.eq(secondary_db1[collAcrossFinal_name].find().itcount(),
- 1,
- 'renamed_across collection does not exist');
- assert.eq(secondary_db0[collRenameWithinDB_name].find().itcount(),
- 0,
- 'collection ' + collRenameWithinDB_name +
- ' still exists after it was supposed to be renamed');
- assert.eq(secondary_db0[collRenameAcrossDBs_name].find().itcount(),
- 0,
- 'collection ' + collRenameAcrossDBs_name +
- ' still exists after it was supposed to be renamed');
-
- rst.checkReplicatedDataHashes();
- rst.checkOplogs();
- rst.stopSet();
+'use strict';
+
+load('jstests/replsets/rslib.js');
+const basename = 'initial_sync_rename_collection';
+
+jsTestLog('Bring up a replica set');
+const rst = new ReplSetTest({name: basename, nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const db0_name = "db0";
+const db1_name = "db1";
+
+const primary = rst.getPrimary();
+
+// Create two separate databases so that we can rename a collection across databases.
+const primary_db0 = primary.getDB(db0_name);
+const primary_db1 = primary.getDB(db1_name);
+
+jsTestLog("Create collections on primary");
+const collRenameWithinDB_name = 'coll_1';
+const collRenameAcrossDBs_name = 'coll_2';
+const collWithinFinal_name = 'renamed';
+const collAcrossFinal_name = 'renamed_across';
+
+// Create two collections on the same database. One will be renamed within the database
+// and the other will be renamed to a different database.
+assert.writeOK(primary_db0[collRenameWithinDB_name].save({}));
+assert.writeOK(primary_db0[collRenameAcrossDBs_name].save({}));
+
+jsTestLog('Waiting for replication');
+rst.awaitReplication();
+
+jsTestLog('Bring up a new node');
+const secondary = rst.add({setParameter: 'numInitialSyncAttempts=1'});
+
+// Add a fail point that causes the secondary's initial sync to hang before
+// copying databases.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+
+jsTestLog('Begin initial sync on secondary');
+let conf = rst.getPrimary().getDB('admin').runCommand({replSetGetConfig: 1}).config;
+conf.members.push({_id: 1, host: secondary.host, priority: 0, votes: 0});
+conf.version++;
+assert.commandWorked(rst.getPrimary().getDB('admin').runCommand({replSetReconfig: conf}));
+assert.eq(primary, rst.getPrimary(), 'Primary changed after reconfig');
+
+// Confirm that initial sync started on the secondary node.
+jsTestLog('Waiting for initial sync to start');
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+
+// Start renaming collections while initial sync is hanging.
+jsTestLog('Rename collection ' + db0_name + '.' + collRenameWithinDB_name + ' to ' + db0_name +
+ '.' + collWithinFinal_name + ' on the sync source ' + db0_name);
+assert.commandWorked(primary_db0[collRenameWithinDB_name].renameCollection(collWithinFinal_name));
+
+jsTestLog('Rename collection ' + db0_name + '.' + collRenameAcrossDBs_name + ' to ' + db1_name +
+ '.' + collAcrossFinal_name + ' on the sync source ' + db0_name);
+assert.commandWorked(primary.adminCommand({
+ renameCollection: primary_db0[collRenameAcrossDBs_name].getFullName(),
+ to: primary_db1[collAcrossFinal_name]
+ .getFullName() // Collection 'renamed_across' is implicitly created.
+}));
+
+// Disable fail point so that the secondary can finish its initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+
+jsTestLog('Wait for both nodes to be up-to-date');
+rst.awaitSecondaryNodes();
+rst.awaitReplication();
+
+const secondary_db0 = secondary.getDB(db0_name);
+const secondary_db1 = secondary.getDB(db1_name);
+
+jsTestLog('Check that collection was renamed correctly on the secondary');
+assert.eq(
+ secondary_db0[collWithinFinal_name].find().itcount(), 1, 'renamed collection does not exist');
+assert.eq(secondary_db1[collAcrossFinal_name].find().itcount(),
+ 1,
+ 'renamed_across collection does not exist');
+assert.eq(
+ secondary_db0[collRenameWithinDB_name].find().itcount(),
+ 0,
+ 'collection ' + collRenameWithinDB_name + ' still exists after it was supposed to be renamed');
+assert.eq(
+ secondary_db0[collRenameAcrossDBs_name].find().itcount(),
+ 0,
+ 'collection ' + collRenameAcrossDBs_name + ' still exists after it was supposed to be renamed');
+
+rst.checkReplicatedDataHashes();
+rst.checkOplogs();
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_replSetGetStatus.js b/jstests/replsets/initial_sync_replSetGetStatus.js
index 2d6bf11829b..60fd36a9c77 100644
--- a/jstests/replsets/initial_sync_replSetGetStatus.js
+++ b/jstests/replsets/initial_sync_replSetGetStatus.js
@@ -4,90 +4,90 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
-
- var name = 'initial_sync_replSetGetStatus';
- var replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
-
- replSet.startSet();
- replSet.initiate();
- var primary = replSet.getPrimary();
-
- var coll = primary.getDB('test').foo;
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.insert({a: 2}));
-
- // Add a secondary node but make it hang before copying databases.
- var secondary = replSet.add();
- secondary.setSlaveOk();
-
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeFinish', mode: 'alwaysOn'}));
- replSet.reInitiate();
-
- // Wait for initial sync to pause before it copies the databases.
- checkLog.contains(secondary,
- 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
-
- // Test that replSetGetStatus returns the correct results while initial sync is in progress.
- var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert(res.initialSyncStatus,
- () => "Response should have an 'initialSyncStatus' field: " + tojson(res));
-
- res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 0}));
- assert(!res.initialSyncStatus,
- () => "Response should not have an 'initialSyncStatus' field: " + tojson(res));
-
- assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "t"}),
- ErrorCodes.TypeMismatch);
-
- assert.writeOK(coll.insert({a: 3}));
- assert.writeOK(coll.insert({a: 4}));
-
- // Let initial sync continue working.
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
-
- // Wait for initial sync to pause right before it finishes.
- checkLog.contains(secondary, 'initial sync - initialSyncHangBeforeFinish fail point enabled');
-
- // Test that replSetGetStatus returns the correct results when initial sync is at the very end.
- res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert(res.initialSyncStatus,
- () => "Response should have an 'initialSyncStatus' field: " + tojson(res));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- assert.eq(res.initialSyncStatus.appliedOps, 3);
- assert.eq(res.initialSyncStatus.failedInitialSyncAttempts, 0);
- assert.eq(res.initialSyncStatus.maxFailedInitialSyncAttempts, 10);
- assert.eq(res.initialSyncStatus.databases.databasesCloned, 3);
- assert.eq(res.initialSyncStatus.databases.test.collections, 1);
- assert.eq(res.initialSyncStatus.databases.test.clonedCollections, 1);
- assert.eq(res.initialSyncStatus.databases.test["test.foo"].documentsToCopy, 4);
- assert.eq(res.initialSyncStatus.databases.test["test.foo"].documentsCopied, 4);
- assert.eq(res.initialSyncStatus.databases.test["test.foo"].indexes, 1);
- assert.eq(res.initialSyncStatus.databases.test["test.foo"].fetchedBatches, 1);
-
- // Let initial sync finish and get into secondary state.
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'initialSyncHangBeforeFinish', mode: 'off'}));
- replSet.awaitSecondaryNodes(60 * 1000);
-
- // Test that replSetGetStatus returns the correct results after initial sync is finished.
- res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert(!res.initialSyncStatus,
- () => "Response should not have an 'initialSyncStatus' field: " + tojson(res));
-
- assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "m"}),
- ErrorCodes.TypeMismatch);
- assert.eq(0,
- secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
- "Oplog buffer was not dropped after initial sync");
-
- replSet.stopSet();
+"use strict";
+load("jstests/libs/check_log.js");
+
+var name = 'initial_sync_replSetGetStatus';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+});
+
+replSet.startSet();
+replSet.initiate();
+var primary = replSet.getPrimary();
+
+var coll = primary.getDB('test').foo;
+assert.writeOK(coll.insert({a: 1}));
+assert.writeOK(coll.insert({a: 2}));
+
+// Add a secondary node but make it hang before copying databases.
+var secondary = replSet.add();
+secondary.setSlaveOk();
+
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'alwaysOn'}));
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeFinish', mode: 'alwaysOn'}));
+replSet.reInitiate();
+
+// Wait for initial sync to pause before it copies the databases.
+checkLog.contains(secondary,
+ 'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
+
+// Test that replSetGetStatus returns the correct results while initial sync is in progress.
+var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+assert(res.initialSyncStatus,
+ () => "Response should have an 'initialSyncStatus' field: " + tojson(res));
+
+res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1, initialSync: 0}));
+assert(!res.initialSyncStatus,
+ () => "Response should not have an 'initialSyncStatus' field: " + tojson(res));
+
+assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "t"}),
+ ErrorCodes.TypeMismatch);
+
+assert.writeOK(coll.insert({a: 3}));
+assert.writeOK(coll.insert({a: 4}));
+
+// Let initial sync continue working.
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
+
+// Wait for initial sync to pause right before it finishes.
+checkLog.contains(secondary, 'initial sync - initialSyncHangBeforeFinish fail point enabled');
+
+// Test that replSetGetStatus returns the correct results when initial sync is at the very end.
+res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+assert(res.initialSyncStatus,
+ () => "Response should have an 'initialSyncStatus' field: " + tojson(res));
+assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+assert.eq(res.initialSyncStatus.appliedOps, 3);
+assert.eq(res.initialSyncStatus.failedInitialSyncAttempts, 0);
+assert.eq(res.initialSyncStatus.maxFailedInitialSyncAttempts, 10);
+assert.eq(res.initialSyncStatus.databases.databasesCloned, 3);
+assert.eq(res.initialSyncStatus.databases.test.collections, 1);
+assert.eq(res.initialSyncStatus.databases.test.clonedCollections, 1);
+assert.eq(res.initialSyncStatus.databases.test["test.foo"].documentsToCopy, 4);
+assert.eq(res.initialSyncStatus.databases.test["test.foo"].documentsCopied, 4);
+assert.eq(res.initialSyncStatus.databases.test["test.foo"].indexes, 1);
+assert.eq(res.initialSyncStatus.databases.test["test.foo"].fetchedBatches, 1);
+
+// Let initial sync finish and get into secondary state.
+assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'initialSyncHangBeforeFinish', mode: 'off'}));
+replSet.awaitSecondaryNodes(60 * 1000);
+
+// Test that replSetGetStatus returns the correct results after initial sync is finished.
+res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+assert(!res.initialSyncStatus,
+ () => "Response should not have an 'initialSyncStatus' field: " + tojson(res));
+
+assert.commandFailedWithCode(secondary.adminCommand({replSetGetStatus: 1, initialSync: "m"}),
+ ErrorCodes.TypeMismatch);
+assert.eq(0,
+ secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
+ "Oplog buffer was not dropped after initial sync");
+
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js b/jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js
index d589c6320bc..0ec56197438 100644
--- a/jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js
+++ b/jstests/replsets/initial_sync_reset_oldest_timestamp_after_failed_attempt.js
@@ -7,94 +7,94 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- // Set the number of initial sync attempts to 2 so that the test fails on unplanned failures.
- const replTest =
- new ReplSetTest({nodes: 2, nodeOptions: {setParameter: "numInitialSyncAttempts=2"}});
- replTest.startSet();
+// Set the number of initial sync attempts to 2 so that the test fails on unplanned failures.
+const replTest =
+ new ReplSetTest({nodes: 2, nodeOptions: {setParameter: "numInitialSyncAttempts=2"}});
+replTest.startSet();
- // Increase the election timeout to 24 hours so that we do not accidentally trigger an election
- // while the secondary is restarting.
- replTest.initiateWithHighElectionTimeout();
+// Increase the election timeout to 24 hours so that we do not accidentally trigger an election
+// while the secondary is restarting.
+replTest.initiateWithHighElectionTimeout();
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "initial_sync_reset_oldest_timestamp_after_failed_attempt";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "initial_sync_reset_oldest_timestamp_after_failed_attempt";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testColl.insert({_id: 1}));
+assert.commandWorked(testColl.insert({_id: 1}));
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 2}));
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 2}));
- // This will be the begin fetching point for both initial sync attempts. After the first initial
- // sync attempt fails, if the oldest timestamp isn't reset before the next attempt, the update
- // to the transaction table for this prepare will fail a WiredTiger assertion that the commit
- // timestamp for a storage transaction cannot be older than the oldest timestamp.
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// This will be the begin fetching point for both initial sync attempts. After the first initial
+// sync attempt fails, if the oldest timestamp isn't reset before the next attempt, the update
+// to the transaction table for this prepare will fail a WiredTiger assertion that the commit
+// timestamp for a storage transaction cannot be older than the oldest timestamp.
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- jsTestLog("Prepared a transaction at timestamp: " + prepareTimestamp);
+jsTestLog("Prepared a transaction at timestamp: " + prepareTimestamp);
- replTest.stop(secondary, undefined, {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- // Set the number of operations per batch to be 1 so that we can know exactly how
- // many batches there will be.
- "replBatchLimitOperations": 1,
- "failpoint.initialSyncHangAfterDataCloning": tojson({mode: "alwaysOn"}),
- // Allow the syncing node to write the prepare oplog entry and apply the first update
- // before failing initial sync.
- "failpoint.failInitialSyncBeforeApplyingBatch": tojson({mode: {skip: 2}}),
- }
- },
- true /* wait */);
+replTest.stop(secondary, undefined, {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ // Set the number of operations per batch to be 1 so that we can know exactly how
+ // many batches there will be.
+ "replBatchLimitOperations": 1,
+ "failpoint.initialSyncHangAfterDataCloning": tojson({mode: "alwaysOn"}),
+ // Allow the syncing node to write the prepare oplog entry and apply the first update
+ // before failing initial sync.
+ "failpoint.failInitialSyncBeforeApplyingBatch": tojson({mode: {skip: 2}}),
+ }
+ },
+ true /* wait */);
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled");
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangAfterDataCloning fail point enabled");
- jsTestLog("Running operations while collection cloning is paused");
+jsTestLog("Running operations while collection cloning is paused");
- // This command will be in the last batch applied before the first initial sync attempt fails.
- // If the oldest timestamp isn't reset on the next attempt, then the timestamp for this update
- // will be the oldest timestamp.
- assert.commandWorked(testColl.update({_id: 1}, {_id: 1, a: 1}));
+// This command will be in the last batch applied before the first initial sync attempt fails.
+// If the oldest timestamp isn't reset on the next attempt, then the timestamp for this update
+// will be the oldest timestamp.
+assert.commandWorked(testColl.update({_id: 1}, {_id: 1, a: 1}));
- // This entry will be applied in its own batch, so the failInitialSyncBeforeApplyingBatch
- // failpoint will cause the first initial sync attempt to fail before applying this.
- assert.commandWorked(testColl.update({_id: 1}, {_id: 1, b: 2}));
+// This entry will be applied in its own batch, so the failInitialSyncBeforeApplyingBatch
+// failpoint will cause the first initial sync attempt to fail before applying this.
+assert.commandWorked(testColl.update({_id: 1}, {_id: 1, b: 2}));
- jsTestLog("Resuming initial sync");
+jsTestLog("Resuming initial sync");
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangAfterDataCloning", mode: "off"}));
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "initialSyncHangAfterDataCloning", mode: "off"}));
- // Wait for this failpoint to be hit before turning it off and causing initial sync to fail.
- checkLog.contains(secondary, "failInitialSyncBeforeApplyingBatch fail point enabled");
+// Wait for this failpoint to be hit before turning it off and causing initial sync to fail.
+checkLog.contains(secondary, "failInitialSyncBeforeApplyingBatch fail point enabled");
- jsTestLog("Failing first initial sync attempt");
+jsTestLog("Failing first initial sync attempt");
- // Turn the failpoint off and cause initial sync to fail.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "failInitialSyncBeforeApplyingBatch", mode: "off"}));
+// Turn the failpoint off and cause initial sync to fail.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "failInitialSyncBeforeApplyingBatch", mode: "off"}));
- replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
+replTest.waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Initial sync completed");
+jsTestLog("Initial sync completed");
- assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(session.abortTransaction_forTesting());
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/initial_sync_test_fixture_test.js b/jstests/replsets/initial_sync_test_fixture_test.js
index 520ba43b2b4..c112bc58ef7 100644
--- a/jstests/replsets/initial_sync_test_fixture_test.js
+++ b/jstests/replsets/initial_sync_test_fixture_test.js
@@ -13,165 +13,164 @@
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
- load("jstests/replsets/libs/initial_sync_test.js");
-
- /**
- * Helper function to check that specific messages appeared or did not appear in the logs. If
- * the command was listIndexes and we expect the message to appear, we also add the collection
- * UUID to make sure that it corresponds to the expected collection.
- */
- function checkLogForCollectionClonerMsg(node, commandName, dbname, contains, collUUID) {
- let msg = "Collection Cloner scheduled a remote command on the " + dbname + " db: { " +
- commandName;
- if (commandName === "listIndexes" && contains) {
- msg += ": " + collUUID;
- }
-
- if (contains) {
- assert(checkLog.checkContainsOnce(node, msg));
- } else {
- assert(!checkLog.checkContainsOnce(node, msg));
- }
- }
+"use strict";
- /**
- * Helper function to check that the specific message appeared exactly once in the logs and that
- * there is no other message saying that the next batch is about to be applied. This will show
- * that oplog application is paused.
- */
- function checkLogForOplogApplicationMsg(node, size) {
- let msg = "Initial Syncer is about to apply the next oplog batch of size: ";
- checkLog.containsWithCount(node, msg, 1, 5 * 1000);
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/initial_sync_test.js");
- msg += size;
- assert(checkLog.checkContainsOnce(node, msg));
+/**
+ * Helper function to check that specific messages appeared or did not appear in the logs. If
+ * the command was listIndexes and we expect the message to appear, we also add the collection
+ * UUID to make sure that it corresponds to the expected collection.
+ */
+function checkLogForCollectionClonerMsg(node, commandName, dbname, contains, collUUID) {
+ let msg =
+ "Collection Cloner scheduled a remote command on the " + dbname + " db: { " + commandName;
+ if (commandName === "listIndexes" && contains) {
+ msg += ": " + collUUID;
}
- // Set up Initial Sync Test.
- const initialSyncTest = new InitialSyncTest();
- const primary = initialSyncTest.getPrimary();
- let secondary = initialSyncTest.getSecondary();
- const db = primary.getDB("test");
- const largeString = 'z'.repeat(10 * 1024 * 1024);
-
- assert.commandWorked(db.foo.insert({a: 1}));
- assert.commandWorked(db.bar.insert({b: 1}));
-
- // Prepare a transaction so that we know that step() can restart the secondary even if there is
- // a prepared transaction.
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase("test");
- const sessionColl = sessionDB.getCollection("foo");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({c: 1}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Do same listDatabases command as CollectionCloner.
- const databases =
- assert.commandWorked(primary.adminCommand({listDatabases: 1, nameOnly: true})).databases;
-
- // This step call restarts the secondary and causes it to go into initial sync.
- assert(!initialSyncTest.step());
+ if (contains) {
+ assert(checkLog.checkContainsOnce(node, msg));
+ } else {
+ assert(!checkLog.checkContainsOnce(node, msg));
+ }
+}
- secondary = initialSyncTest.getSecondary();
- secondary.setSlaveOk();
+/**
+ * Helper function to check that the specific message appeared exactly once in the logs and that
+ * there is no other message saying that the next batch is about to be applied. This will show
+ * that oplog application is paused.
+ */
+function checkLogForOplogApplicationMsg(node, size) {
+ let msg = "Initial Syncer is about to apply the next oplog batch of size: ";
+ checkLog.containsWithCount(node, msg, 1, 5 * 1000);
+
+ msg += size;
+ assert(checkLog.checkContainsOnce(node, msg));
+}
+
+// Set up Initial Sync Test.
+const initialSyncTest = new InitialSyncTest();
+const primary = initialSyncTest.getPrimary();
+let secondary = initialSyncTest.getSecondary();
+const db = primary.getDB("test");
+const largeString = 'z'.repeat(10 * 1024 * 1024);
+
+assert.commandWorked(db.foo.insert({a: 1}));
+assert.commandWorked(db.bar.insert({b: 1}));
+
+// Prepare a transaction so that we know that step() can restart the secondary even if there is
+// a prepared transaction.
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase("test");
+const sessionColl = sessionDB.getCollection("foo");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({c: 1}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Do same listDatabases command as CollectionCloner.
+const databases =
+ assert.commandWorked(primary.adminCommand({listDatabases: 1, nameOnly: true})).databases;
+
+// This step call restarts the secondary and causes it to go into initial sync.
+assert(!initialSyncTest.step());
+
+secondary = initialSyncTest.getSecondary();
+secondary.setSlaveOk();
+
+// Make sure that we cannot read from this node yet.
+assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
+ ErrorCodes.NotMasterOrSecondary);
+
+// Make sure that we saw the listDatabases call in the log messages, but didn't see any
+// listCollections or listIndexes call.
+checkLogForCollectionClonerMsg(secondary, "listDatabases", "admin", true);
+checkLogForCollectionClonerMsg(secondary, "listCollections", "admin", false);
+checkLogForCollectionClonerMsg(secondary, "listIndexes", "admin", false);
+
+// Iterate over the databases and collections in the same order that the test fixture would so
+// that we can check the log messages to make sure initial sync is paused as expected.
+for (let dbObj of databases) {
+ const dbname = dbObj.name;
+
+ // We skip the local db during the collection cloning phase of initial sync.
+ if (dbname === "local") {
+ continue;
+ }
- // Make sure that we cannot read from this node yet.
- assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
- ErrorCodes.NotMasterOrSecondary);
+ const database = primary.getDB(dbname);
- // Make sure that we saw the listDatabases call in the log messages, but didn't see any
- // listCollections or listIndexes call.
- checkLogForCollectionClonerMsg(secondary, "listDatabases", "admin", true);
- checkLogForCollectionClonerMsg(secondary, "listCollections", "admin", false);
- checkLogForCollectionClonerMsg(secondary, "listIndexes", "admin", false);
+ // Do same listCollections command as CollectionCloner.
+ const res = assert.commandWorked(database.runCommand(
+ {listCollections: 1, filter: {$or: [{type: "collection"}, {type: {$exists: false}}]}}));
- // Iterate over the databases and collections in the same order that the test fixture would so
- // that we can check the log messages to make sure initial sync is paused as expected.
- for (let dbObj of databases) {
- const dbname = dbObj.name;
+ // Make sure that there is only one batch.
+ assert.eq(NumberLong(0), res.cursor.id, res);
- // We skip the local db during the collection cloning phase of initial sync.
- if (dbname === "local") {
- continue;
- }
+ const collectionsCursor = res.cursor;
- const database = primary.getDB(dbname);
+ // For each database, CollectionCloner will first call listCollections.
+ assert(!initialSyncTest.step());
- // Do same listCollections command as CollectionCloner.
- const res = assert.commandWorked(database.runCommand(
- {listCollections: 1, filter: {$or: [{type: "collection"}, {type: {$exists: false}}]}}));
+ // Make sure that we cannot read from this node yet.
+ assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
+ ErrorCodes.NotMasterOrSecondary);
- // Make sure that there is only one batch.
- assert.eq(NumberLong(0), res.cursor.id, res);
+ // Make sure that we saw the listCollections call in the log messages, but didn't see a
+ // listIndexes call.
+ checkLogForCollectionClonerMsg(secondary, "listCollections", dbname, true);
+ checkLogForCollectionClonerMsg(secondary, "listIndexes", "admin", false);
- const collectionsCursor = res.cursor;
+ for (let collectionObj of collectionsCursor.firstBatch) {
+ assert(collectionObj.info, collectionObj);
+ const collUUID = collectionObj.info.uuid;
- // For each database, CollectionCloner will first call listCollections.
+ // For each collection, CollectionCloner will call listIndexes.
assert(!initialSyncTest.step());
// Make sure that we cannot read from this node yet.
assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
ErrorCodes.NotMasterOrSecondary);
- // Make sure that we saw the listCollections call in the log messages, but didn't see a
- // listIndexes call.
- checkLogForCollectionClonerMsg(secondary, "listCollections", dbname, true);
- checkLogForCollectionClonerMsg(secondary, "listIndexes", "admin", false);
-
- for (let collectionObj of collectionsCursor.firstBatch) {
- assert(collectionObj.info, collectionObj);
- const collUUID = collectionObj.info.uuid;
-
- // For each collection, CollectionCloner will call listIndexes.
- assert(!initialSyncTest.step());
+ // Make sure that we saw the listIndexes call in the log messages, but didn't
+ // see a listCollections call.
+ checkLogForCollectionClonerMsg(secondary, "listIndexes", dbname, true, collUUID);
+ checkLogForCollectionClonerMsg(secondary, "listCollections", "admin", false);
- // Make sure that we cannot read from this node yet.
- assert.commandFailedWithCode(secondary.getDB("test").runCommand({count: "foo"}),
- ErrorCodes.NotMasterOrSecondary);
-
- // Make sure that we saw the listIndexes call in the log messages, but didn't
- // see a listCollections call.
- checkLogForCollectionClonerMsg(secondary, "listIndexes", dbname, true, collUUID);
- checkLogForCollectionClonerMsg(secondary, "listCollections", "admin", false);
-
- // Perform large operations during collection cloning so that we will need multiple
- // batches during oplog application.
- assert.commandWorked(db.foo.insert({d: largeString}));
- assert.commandWorked(db.bar.insert({e: largeString}));
- }
+ // Perform large operations during collection cloning so that we will need multiple
+ // batches during oplog application.
+ assert.commandWorked(db.foo.insert({d: largeString}));
+ assert.commandWorked(db.bar.insert({e: largeString}));
}
+}
- // Check that we see the expected number of batches during oplog application.
-
- // This batch should correspond to the 'prepare' op.
- assert(!initialSyncTest.step());
- checkLogForOplogApplicationMsg(secondary, 1);
- assert(!initialSyncTest.step());
- checkLogForOplogApplicationMsg(secondary, 9);
- assert(!initialSyncTest.step());
- checkLogForOplogApplicationMsg(secondary, 1);
+// Check that we see the expected number of batches during oplog application.
- assert(initialSyncTest.step(), "Expected initial sync to have completed, but it did not");
+// This batch should correspond to the 'prepare' op.
+assert(!initialSyncTest.step());
+checkLogForOplogApplicationMsg(secondary, 1);
+assert(!initialSyncTest.step());
+checkLogForOplogApplicationMsg(secondary, 9);
+assert(!initialSyncTest.step());
+checkLogForOplogApplicationMsg(secondary, 1);
- // Abort transaction so that the data consistency checks in stop() can run.
- assert.commandWorked(session.abortTransaction_forTesting());
+assert(initialSyncTest.step(), "Expected initial sync to have completed, but it did not");
- // Issue a w:2 write to make sure the secondary has replicated the abortTransaction oplog entry.
- assert.commandWorked(primary.getDB("otherDB").otherColl.insert({x: 1}, {writeConcern: {w: 2}}));
+// Abort transaction so that the data consistency checks in stop() can run.
+assert.commandWorked(session.abortTransaction_forTesting());
- // Confirm that node can be read from and that it has the inserts that were made while the node
- // was in initial sync.
- assert.eq(secondary.getDB("test").foo.find().count(), 6);
- assert.eq(secondary.getDB("test").bar.find().count(), 6);
- assert.eq(secondary.getDB("test").foo.find().itcount(), 6);
- assert.eq(secondary.getDB("test").bar.find().itcount(), 6);
+// Issue a w:2 write to make sure the secondary has replicated the abortTransaction oplog entry.
+assert.commandWorked(primary.getDB("otherDB").otherColl.insert({x: 1}, {writeConcern: {w: 2}}));
- // Do data consistency checks at the end.
- initialSyncTest.stop();
+// Confirm that node can be read from and that it has the inserts that were made while the node
+// was in initial sync.
+assert.eq(secondary.getDB("test").foo.find().count(), 6);
+assert.eq(secondary.getDB("test").bar.find().count(), 6);
+assert.eq(secondary.getDB("test").foo.find().itcount(), 6);
+assert.eq(secondary.getDB("test").bar.find().itcount(), 6);
+// Do data consistency checks at the end.
+initialSyncTest.stop();
})();
diff --git a/jstests/replsets/initial_sync_update_missing_doc1.js b/jstests/replsets/initial_sync_update_missing_doc1.js
index 25cdc7ebe6b..418b566fcbf 100644
--- a/jstests/replsets/initial_sync_update_missing_doc1.js
+++ b/jstests/replsets/initial_sync_update_missing_doc1.js
@@ -12,41 +12,42 @@
*/
(function() {
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/libs/check_log.js");
- const name = 'initial_sync_update_missing_doc1';
- const replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
+const name = 'initial_sync_update_missing_doc1';
+const replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+});
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
+replSet.startSet();
+replSet.initiate();
+const primary = replSet.getPrimary();
+const dbName = 'test';
- var coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
+var coll = primary.getDB(dbName).getCollection(name);
+assert.commandWorked(coll.insert({_id: 0, x: 1}));
- // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
- // it is syncing from the primary.
- const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+// Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
+// it is syncing from the primary.
+const secondaryConfig = {
+ rsConfig: {votes: 0, priority: 0}
+};
+const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
- // Update and remove document on primary.
- updateRemove(coll, {_id: 0});
+// Update and remove document on primary.
+updateRemove(coll, {_id: 0});
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
- var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 0 /* numInserted */);
+turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 0 /* numInserted */);
- finishAndValidate(replSet, name, firstOplogEnd, 0 /* numInserted */, 0 /* numDocuments */);
-
- replSet.stopSet();
+finishAndValidate(replSet, name, firstOplogEnd, 0 /* numInserted */, 0 /* numDocuments */);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_update_missing_doc2.js b/jstests/replsets/initial_sync_update_missing_doc2.js
index 0d4878ea01c..bd3b2d8957a 100644
--- a/jstests/replsets/initial_sync_update_missing_doc2.js
+++ b/jstests/replsets/initial_sync_update_missing_doc2.js
@@ -13,49 +13,50 @@
*/
(function() {
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/libs/check_log.js");
- var name = 'initial_sync_update_missing_doc2';
- var replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
+var name = 'initial_sync_update_missing_doc2';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+});
- replSet.startSet();
- replSet.initiate();
+replSet.startSet();
+replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
+const primary = replSet.getPrimary();
+const dbName = 'test';
- var coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
+var coll = primary.getDB(dbName).getCollection(name);
+assert.commandWorked(coll.insert({_id: 0, x: 1}));
- // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
- // it is syncing from the primary.
- const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+// Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
+// it is syncing from the primary.
+const secondaryConfig = {
+ rsConfig: {votes: 0, priority: 0}
+};
+const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
- // Update and remove document on primary.
- updateRemove(coll, {_id: 0});
+// Update and remove document on primary.
+updateRemove(coll, {_id: 0});
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
- // Re-insert deleted document on the sync source. The secondary should be able to fetch and
- // insert this document after failing to apply the udpate.
- assert.commandWorked(coll.insert({_id: 0, x: 3}));
+// Re-insert deleted document on the sync source. The secondary should be able to fetch and
+// insert this document after failing to apply the udpate.
+assert.commandWorked(coll.insert({_id: 0, x: 3}));
- var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
- // Temporarily increase log levels so that we can see the 'Inserted missing document' log line.
- secondary.getDB('test').setLogLevel(1, 'replication');
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1 /* numInserted */);
- secondary.getDB('test').setLogLevel(0, 'replication');
+// Temporarily increase log levels so that we can see the 'Inserted missing document' log line.
+secondary.getDB('test').setLogLevel(1, 'replication');
+turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1 /* numInserted */);
+secondary.getDB('test').setLogLevel(0, 'replication');
- finishAndValidate(replSet, name, firstOplogEnd, 1 /* numInserted */, 1 /* numDocuments */);
-
- replSet.stopSet();
+finishAndValidate(replSet, name, firstOplogEnd, 1 /* numInserted */, 1 /* numDocuments */);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_update_missing_doc3.js b/jstests/replsets/initial_sync_update_missing_doc3.js
index 976603f4279..dadc0f32d9b 100644
--- a/jstests/replsets/initial_sync_update_missing_doc3.js
+++ b/jstests/replsets/initial_sync_update_missing_doc3.js
@@ -14,59 +14,61 @@
*/
(function() {
- load("jstests/libs/check_log.js");
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
+load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/replsets/libs/two_phase_drops.js"); // For TwoPhaseDropCollectionTest.
- var name = 'initial_sync_update_missing_doc3';
- var replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
+var name = 'initial_sync_update_missing_doc3';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+});
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
+replSet.startSet();
+replSet.initiate();
+const primary = replSet.getPrimary();
+const dbName = 'test';
- // Check for 'system.drop' two phase drop support.
- if (!TwoPhaseDropCollectionTest.supportsDropPendingNamespaces(replSet)) {
- jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
- replSet.stopSet();
- return;
- }
+// Check for 'system.drop' two phase drop support.
+if (!TwoPhaseDropCollectionTest.supportsDropPendingNamespaces(replSet)) {
+ jsTestLog('Drop pending namespaces not supported by storage engine. Skipping test.');
+ replSet.stopSet();
+ return;
+}
- var coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
+var coll = primary.getDB(dbName).getCollection(name);
+assert.commandWorked(coll.insert({_id: 0, x: 1}));
- // Add a secondary node with priority: 0 so that we prevent elections while it is syncing
- // from the primary.
- // We cannot give the secondary votes: 0 because then it will not be able to acknowledge
- // majority writes. That means the sync source can immediately drop it's collection
- // because it alone determines the majority commit point.
- const secondaryConfig = {rsConfig: {priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+// Add a secondary node with priority: 0 so that we prevent elections while it is syncing
+// from the primary.
+// We cannot give the secondary votes: 0 because then it will not be able to acknowledge
+// majority writes. That means the sync source can immediately drop it's collection
+// because it alone determines the majority commit point.
+const secondaryConfig = {
+ rsConfig: {priority: 0}
+};
+const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
- // Update and remove document on primary.
- updateRemove(coll, {_id: 0});
+// Update and remove document on primary.
+updateRemove(coll, {_id: 0});
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
- // Re-insert deleted document.
- assert.commandWorked(coll.insert({_id: 0, x: 3}));
- // Mark the collection as drop pending so it gets renamed, but retains the UUID.
- assert.commandWorked(primary.getDB('test').runCommand({"drop": name}));
+// Re-insert deleted document.
+assert.commandWorked(coll.insert({_id: 0, x: 3}));
+// Mark the collection as drop pending so it gets renamed, but retains the UUID.
+assert.commandWorked(primary.getDB('test').runCommand({"drop": name}));
- var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
- secondary.getDB('test').setLogLevel(1, 'replication');
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1);
- secondary.getDB('test').setLogLevel(0, 'replication');
+secondary.getDB('test').setLogLevel(1, 'replication');
+turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1);
+secondary.getDB('test').setLogLevel(0, 'replication');
- replSet.awaitReplication();
- replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
+replSet.awaitSecondaryNodes();
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_update_missing_doc_with_prepare.js b/jstests/replsets/initial_sync_update_missing_doc_with_prepare.js
index f20cb9797d1..d4143b50148 100644
--- a/jstests/replsets/initial_sync_update_missing_doc_with_prepare.js
+++ b/jstests/replsets/initial_sync_update_missing_doc_with_prepare.js
@@ -14,82 +14,80 @@
*/
(function() {
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/libs/check_log.js");
-
- function doTest(doTransactionWork, numDocuments) {
- const name = 'initial_sync_update_missing_doc_with_prepare';
- const replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
-
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
-
- var coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
- assert.commandWorked(coll.insert({_id: 1, x: 1}));
-
- // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
- // it is syncing from the primary.
- const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
-
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(name);
-
- session.startTransaction();
- doTransactionWork(sessionColl, {_id: 0});
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- // This transaction is eventually aborted, so this document should exist on the secondary
- // after initial sync.
- session.startTransaction();
- doTransactionWork(sessionColl, {_id: 1});
- PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(session.abortTransaction_forTesting());
-
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
-
- var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
-
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 0 /* numInserted */);
-
- // Since we aborted the second transaction, we expect this collection to still exist after
- // initial sync.
- finishAndValidate(replSet, name, firstOplogEnd, 0 /* numInserted */, numDocuments);
-
- // Make sure the secondary has the correct documents after syncing from the primary. The
- // second document was deleted in the prepared transaction that was aborted. Therefore, it
- // should have been properly replication.
- coll = secondary.getDB(dbName).getCollection(name);
- assert.docEq(null, coll.findOne({_id: 0}), 'document on secondary matches primary');
- assert.docEq(
- {_id: 1, x: 1}, coll.findOne({_id: 1}), 'document on secondary matches primary');
-
- replSet.stopSet();
- }
-
- jsTestLog("Testing with prepared transaction");
- // Passing in a function to update and remove document on primary in a prepared transaction
- // between phrase 1 and 2. Once the secondary receives the commit for the transaction, the
- // secondary should apply each operation separately (one update, and one delete) during initial
- // sync.
- doTest(updateRemove, 1 /* numDocuments after initial sync */);
-
- jsTestLog("Testing with large prepared transaction");
- // Passing in a function to insert, update and remove large documents on primary in a large
- // prepared transaction. Once the secondary receives the commit for the transaction, the
- // secondary should apply each operation separately (one insert, one update, and one delete)
- // during initial sync.
- doTest(insertUpdateRemoveLarge, 2 /* numDocuments after initial sync */);
-
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/libs/check_log.js");
+
+function doTest(doTransactionWork, numDocuments) {
+ const name = 'initial_sync_update_missing_doc_with_prepare';
+ const replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+ });
+
+ replSet.startSet();
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const dbName = 'test';
+
+ var coll = primary.getDB(dbName).getCollection(name);
+ assert.commandWorked(coll.insert({_id: 0, x: 1}));
+ assert.commandWorked(coll.insert({_id: 1, x: 1}));
+
+ // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
+ // it is syncing from the primary.
+ const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
+ const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+
+ const session = primary.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB.getCollection(name);
+
+ session.startTransaction();
+ doTransactionWork(sessionColl, {_id: 0});
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+ // This transaction is eventually aborted, so this document should exist on the secondary
+ // after initial sync.
+ session.startTransaction();
+ doTransactionWork(sessionColl, {_id: 1});
+ PrepareHelpers.prepareTransaction(session);
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+
+ var res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+ assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+ var firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+
+ turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 0 /* numInserted */);
+
+ // Since we aborted the second transaction, we expect this collection to still exist after
+ // initial sync.
+ finishAndValidate(replSet, name, firstOplogEnd, 0 /* numInserted */, numDocuments);
+
+ // Make sure the secondary has the correct documents after syncing from the primary. The
+ // second document was deleted in the prepared transaction that was aborted. Therefore, it
+ // should have been properly replication.
+ coll = secondary.getDB(dbName).getCollection(name);
+ assert.docEq(null, coll.findOne({_id: 0}), 'document on secondary matches primary');
+ assert.docEq({_id: 1, x: 1}, coll.findOne({_id: 1}), 'document on secondary matches primary');
+
+ replSet.stopSet();
+}
+
+jsTestLog("Testing with prepared transaction");
+// Passing in a function to update and remove document on primary in a prepared transaction
+// between phrase 1 and 2. Once the secondary receives the commit for the transaction, the
+// secondary should apply each operation separately (one update, and one delete) during initial
+// sync.
+doTest(updateRemove, 1 /* numDocuments after initial sync */);
+
+jsTestLog("Testing with large prepared transaction");
+// Passing in a function to insert, update and remove large documents on primary in a large
+// prepared transaction. Once the secondary receives the commit for the transaction, the
+// secondary should apply each operation separately (one insert, one update, and one delete)
+// during initial sync.
+doTest(insertUpdateRemoveLarge, 2 /* numDocuments after initial sync */);
})();
diff --git a/jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js b/jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js
index 9ea620377d5..369306f5a4a 100644
--- a/jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js
+++ b/jstests/replsets/initial_sync_update_reinsert_missing_doc_with_prepare.js
@@ -14,74 +14,72 @@
*/
(function() {
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
- load("jstests/libs/check_log.js");
-
- function doTest(doTransactionWork, numDocuments) {
- const name = 'initial_sync_update_missing_doc_with_prepare';
- const replSet = new ReplSetTest({
- name: name,
- nodes: 1,
- });
-
- replSet.startSet();
- replSet.initiate();
- const primary = replSet.getPrimary();
- const dbName = 'test';
-
- const coll = primary.getDB(dbName).getCollection(name);
- assert.commandWorked(coll.insert({_id: 0, x: 1}));
-
- // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
- // it is syncing from the primary.
- const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
- const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
-
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(name);
-
- session.startTransaction();
- doTransactionWork(sessionColl, {_id: 0});
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
-
- // Re-insert deleted document on the sync source. The secondary should be able to fetch and
- // insert this document after failing to apply the udpate.
- assert.commandWorked(coll.insert({_id: 0, x: 3}));
-
- const res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
- assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
- const firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
-
- // Temporarily increase log levels so that we can see the 'Inserted missing document' log
- // line.
- secondary.getDB('test').setLogLevel(1, 'replication');
- turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1 /* numInserted */);
- secondary.getDB('test').setLogLevel(0, 'replication');
-
- finishAndValidate(replSet, name, firstOplogEnd, 1 /* numInserted */, numDocuments);
- assert.docEq(
- {_id: 0, x: 3}, coll.findOne({_id: 0}), 'document on secondary matches primary');
-
- replSet.stopSet();
- }
-
- jsTestLog("Testing with prepared transaction");
- // Passing in a function to update and remove document on primary in a prepared transaction
- // between phrase 1 and 2. Once the secondary receives the commit for the transaction, the
- // secondary should apply each operation separately (one update, and one delete) during initial
- // sync.
- doTest(updateRemove, 1 /* numDocuments after initial sync */);
-
- jsTestLog("Testing with large prepared transaction");
- // Passing in a function to insert, update and remove large documents on primary in a large
- // prepared transaction. Once the secondary receives the commit for the transaction, the
- // secondary should apply each operation separately (one insert, one update, and one delete)
- // during initial sync.
- doTest(insertUpdateRemoveLarge, 2 /* numDocuments after initial sync */);
-
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/initial_sync_update_missing_doc.js");
+load("jstests/libs/check_log.js");
+
+function doTest(doTransactionWork, numDocuments) {
+ const name = 'initial_sync_update_missing_doc_with_prepare';
+ const replSet = new ReplSetTest({
+ name: name,
+ nodes: 1,
+ });
+
+ replSet.startSet();
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const dbName = 'test';
+
+ const coll = primary.getDB(dbName).getCollection(name);
+ assert.commandWorked(coll.insert({_id: 0, x: 1}));
+
+ // Add a secondary node with priority: 0 and votes: 0 so that we prevent elections while
+ // it is syncing from the primary.
+ const secondaryConfig = {rsConfig: {votes: 0, priority: 0}};
+ const secondary = reInitiateSetWithSecondary(replSet, secondaryConfig);
+
+ const session = primary.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB.getCollection(name);
+
+ session.startTransaction();
+ doTransactionWork(sessionColl, {_id: 0});
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+ turnOffHangBeforeCopyingDatabasesFailPoint(secondary);
+
+ // Re-insert deleted document on the sync source. The secondary should be able to fetch and
+ // insert this document after failing to apply the udpate.
+ assert.commandWorked(coll.insert({_id: 0, x: 3}));
+
+ const res = assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}));
+ assert.eq(res.initialSyncStatus.fetchedMissingDocs, 0);
+ const firstOplogEnd = res.initialSyncStatus.initialSyncOplogEnd;
+
+ // Temporarily increase log levels so that we can see the 'Inserted missing document' log
+ // line.
+ secondary.getDB('test').setLogLevel(1, 'replication');
+ turnOffHangBeforeGettingMissingDocFailPoint(primary, secondary, name, 1 /* numInserted */);
+ secondary.getDB('test').setLogLevel(0, 'replication');
+
+ finishAndValidate(replSet, name, firstOplogEnd, 1 /* numInserted */, numDocuments);
+ assert.docEq({_id: 0, x: 3}, coll.findOne({_id: 0}), 'document on secondary matches primary');
+
+ replSet.stopSet();
+}
+
+jsTestLog("Testing with prepared transaction");
+// Passing in a function to update and remove document on primary in a prepared transaction
+// between phrase 1 and 2. Once the secondary receives the commit for the transaction, the
+// secondary should apply each operation separately (one update, and one delete) during initial
+// sync.
+doTest(updateRemove, 1 /* numDocuments after initial sync */);
+
+jsTestLog("Testing with large prepared transaction");
+// Passing in a function to insert, update and remove large documents on primary in a large
+// prepared transaction. Once the secondary receives the commit for the transaction, the
+// secondary should apply each operation separately (one insert, one update, and one delete)
+// during initial sync.
+doTest(insertUpdateRemoveLarge, 2 /* numDocuments after initial sync */);
})();
diff --git a/jstests/replsets/initial_sync_uuid_not_found.js b/jstests/replsets/initial_sync_uuid_not_found.js
index 267f468b0c4..0942ac1f54b 100644
--- a/jstests/replsets/initial_sync_uuid_not_found.js
+++ b/jstests/replsets/initial_sync_uuid_not_found.js
@@ -5,72 +5,67 @@
* results in an empty result or zero count.
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/check_log.js');
+load('jstests/libs/check_log.js');
- const basename = 'initial_sync_rename_collection';
+const basename = 'initial_sync_rename_collection';
- jsTestLog('Bring up set');
- const rst = new ReplSetTest(
- {name: basename, nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+jsTestLog('Bring up set');
+const rst = new ReplSetTest(
+ {name: basename, nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB('d');
- const primaryColl = primaryDB.coll;
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB('d');
+const primaryColl = primaryDB.coll;
- jsTestLog('Create a collection (with a UUID) and insert a document.');
- assert.writeOK(primaryColl.insert({_id: 0}));
+jsTestLog('Create a collection (with a UUID) and insert a document.');
+assert.writeOK(primaryColl.insert({_id: 0}));
- const collInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
- assert(collInfo.info.uuid,
- 'newly created collection expected to have a UUID: ' + tojson(collInfo));
+const collInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
+assert(collInfo.info.uuid, 'newly created collection expected to have a UUID: ' + tojson(collInfo));
- jsTestLog('Make sure synced');
- rst.awaitReplication();
+jsTestLog('Make sure synced');
+rst.awaitReplication();
- jsTestLog('Resync the secondary enabling failpoint');
- function ResyncWithFailpoint(failpointName, failpointData) {
- let setParameter = {numInitialSyncAttempts: 1};
- setParameter['failpoint.' + failpointName] =
- tojson({mode: 'alwaysOn', data: failpointData});
- rst.restart(1, {startClean: true, setParameter});
- const secondary = rst.nodes[1];
- assert.eq(primary, rst.getPrimary(), 'Primary changed after reconfig');
+jsTestLog('Resync the secondary enabling failpoint');
+function ResyncWithFailpoint(failpointName, failpointData) {
+ let setParameter = {numInitialSyncAttempts: 1};
+ setParameter['failpoint.' + failpointName] = tojson({mode: 'alwaysOn', data: failpointData});
+ rst.restart(1, {startClean: true, setParameter});
+ const secondary = rst.nodes[1];
+ assert.eq(primary, rst.getPrimary(), 'Primary changed after reconfig');
- jsTestLog('Wait for new node to start cloning');
- secondary.setSlaveOk();
- const secondaryDB = secondary.getDB(primaryDB.getName());
- const secondaryColl = secondaryDB[primaryColl.getName()];
+ jsTestLog('Wait for new node to start cloning');
+ secondary.setSlaveOk();
+ const secondaryDB = secondary.getDB(primaryDB.getName());
+ const secondaryColl = secondaryDB[primaryColl.getName()];
- rst.reInitiate();
- checkLog.contains(secondary, 'initial sync - ' + failpointName + ' fail point enabled');
+ rst.reInitiate();
+ checkLog.contains(secondary, 'initial sync - ' + failpointName + ' fail point enabled');
- jsTestLog('Remove collection on the primary and insert a new document, recreating it.');
- assert(primaryColl.drop());
- assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 'majority'}}));
- const newCollInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
- assert(collInfo.info.uuid,
- 'recreated collection expected to have a UUID: ' + tojson(collInfo));
- assert.neq(collInfo.info.uuid,
- newCollInfo.info.uuid,
- 'recreated collection expected to have different UUID');
+ jsTestLog('Remove collection on the primary and insert a new document, recreating it.');
+ assert(primaryColl.drop());
+ assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 'majority'}}));
+ const newCollInfo = primaryDB.getCollectionInfos({name: primaryColl.getName()})[0];
+ assert(collInfo.info.uuid, 'recreated collection expected to have a UUID: ' + tojson(collInfo));
+ assert.neq(collInfo.info.uuid,
+ newCollInfo.info.uuid,
+ 'recreated collection expected to have different UUID');
- jsTestLog('Disable failpoint and resume initial sync');
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: failpointName, mode: 'off'}));
+ jsTestLog('Disable failpoint and resume initial sync');
+ assert.commandWorked(secondary.adminCommand({configureFailPoint: failpointName, mode: 'off'}));
- jsTestLog('Wait for both nodes to be up-to-date');
- rst.awaitSecondaryNodes();
- rst.awaitReplication();
+ jsTestLog('Wait for both nodes to be up-to-date');
+ rst.awaitSecondaryNodes();
+ rst.awaitReplication();
- jsTestLog('Check consistency and shut down replica-set');
- rst.checkReplicatedDataHashes();
- }
- ResyncWithFailpoint('initialSyncHangBeforeCollectionClone',
- {namespace: primaryColl.getFullName()});
- ResyncWithFailpoint('initialSyncHangAfterListCollections', {database: primaryDB.getName()});
- rst.stopSet();
+ jsTestLog('Check consistency and shut down replica-set');
+ rst.checkReplicatedDataHashes();
+}
+ResyncWithFailpoint('initialSyncHangBeforeCollectionClone', {namespace: primaryColl.getFullName()});
+ResyncWithFailpoint('initialSyncHangAfterListCollections', {database: primaryDB.getName()});
+rst.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_views.js b/jstests/replsets/initial_sync_views.js
index bf60951837b..ae202aff0e7 100644
--- a/jstests/replsets/initial_sync_views.js
+++ b/jstests/replsets/initial_sync_views.js
@@ -3,39 +3,39 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js");
- let testName = "initial_sync_views";
- let hostName = getHostName();
+load("jstests/replsets/rslib.js");
+let testName = "initial_sync_views";
+let hostName = getHostName();
- let replTest = new ReplSetTest({name: testName, nodes: 1});
- replTest.startSet();
- replTest.initiate();
+let replTest = new ReplSetTest({name: testName, nodes: 1});
+replTest.startSet();
+replTest.initiate();
- let primaryDB = replTest.getPrimary().getDB(testName);
+let primaryDB = replTest.getPrimary().getDB(testName);
- for (let i = 0; i < 10; ++i) {
- assert.writeOK(primaryDB.coll.insert({a: i}));
- }
+for (let i = 0; i < 10; ++i) {
+ assert.writeOK(primaryDB.coll.insert({a: i}));
+}
- // Setup view.
- assert.commandWorked(
- primaryDB.runCommand({create: "view", viewOn: "coll", pipeline: [{$match: {a: 5}}]}));
+// Setup view.
+assert.commandWorked(
+ primaryDB.runCommand({create: "view", viewOn: "coll", pipeline: [{$match: {a: 5}}]}));
- assert.eq(10, primaryDB.coll.find().itcount());
- assert.eq(1, primaryDB.view.find().itcount());
+assert.eq(10, primaryDB.coll.find().itcount());
+assert.eq(1, primaryDB.view.find().itcount());
- // Add new member to the replica set and wait for initial sync to complete.
- let secondary = replTest.add();
- replTest.reInitiate();
- replTest.awaitReplication();
- replTest.awaitSecondaryNodes();
+// Add new member to the replica set and wait for initial sync to complete.
+let secondary = replTest.add();
+replTest.reInitiate();
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
- // Confirm secondary has expected collection and view document count.
- let secondaryDB = secondary.getDB(testName);
- assert.eq(10, secondaryDB.coll.find().itcount());
- assert.eq(1, secondaryDB.view.find().itcount());
+// Confirm secondary has expected collection and view document count.
+let secondaryDB = secondary.getDB(testName);
+assert.eq(10, secondaryDB.coll.find().itcount());
+assert.eq(1, secondaryDB.view.find().itcount());
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/initiate.js b/jstests/replsets/initiate.js
index 0afa0c85bcd..994cd3b73ea 100644
--- a/jstests/replsets/initiate.js
+++ b/jstests/replsets/initiate.js
@@ -3,22 +3,22 @@
* configs, so this is just seeing if it fails when it's supposed to.
*/
(function() {
- "use strict";
- var replTest = new ReplSetTest({name: 'testSet2', nodes: 1});
- var nodes = replTest.startSet();
+"use strict";
+var replTest = new ReplSetTest({name: 'testSet2', nodes: 1});
+var nodes = replTest.startSet();
- assert.soon(function() {
- try {
- var result = nodes[0].getDB("admin").runCommand(
- {replSetInitiate: {_id: "testSet2", members: [{_id: 0, tags: ["member0"]}]}});
- printjson(result);
- return (result.errmsg.match(/bad or missing host field/) ||
- result.errmsg.match(/Missing expected field \"host\"/));
- } catch (e) {
- print(e);
- }
- return false;
- });
+assert.soon(function() {
+ try {
+ var result = nodes[0].getDB("admin").runCommand(
+ {replSetInitiate: {_id: "testSet2", members: [{_id: 0, tags: ["member0"]}]}});
+ printjson(result);
+ return (result.errmsg.match(/bad or missing host field/) ||
+ result.errmsg.match(/Missing expected field \"host\"/));
+ } catch (e) {
+ print(e);
+ }
+ return false;
+});
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/inmemory_preserves_active_txns.js b/jstests/replsets/inmemory_preserves_active_txns.js
index 2a5791b35ae..c05c24fb711 100644
--- a/jstests/replsets/inmemory_preserves_active_txns.js
+++ b/jstests/replsets/inmemory_preserves_active_txns.js
@@ -11,106 +11,104 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- // If the test runner passed --storageEngine=inMemory then we know inMemory is compiled into the
- // server. We'll actually use both inMemory and wiredTiger storage engines.
- const storageEngine = jsTest.options().storageEngine;
- if (storageEngine !== 'inMemory') {
- jsTestLog(`Skip test: storageEngine == "${storageEngine}", not "inMemory"`);
- return;
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+// If the test runner passed --storageEngine=inMemory then we know inMemory is compiled into the
+// server. We'll actually use both inMemory and wiredTiger storage engines.
+const storageEngine = jsTest.options().storageEngine;
+if (storageEngine !== 'inMemory') {
+ jsTestLog(`Skip test: storageEngine == "${storageEngine}", not "inMemory"`);
+ return;
+}
+
+// A new replica set for both the commit and abort tests to ensure the same clean state.
+function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [
+ {storageEngine: "wiredTiger"},
+ // inMemory node must not be a voter, otherwise lastCommitted never advances
+ {storageEngine: "inMemory", rsConfig: {priority: 0, votes: 0}},
+ ],
+ waitForKeys: false
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+
+ const primary = replSet.getPrimary();
+ const secondary = replSet.getSecondary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ const oldestRequiredTimestampForCrashRecovery =
+ PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
+ assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
+
+ jsTestLog("Get transaction entry from config.transactions");
+
+ const txnEntry = primary.getDB("config").transactions.findOne();
+ // The prepare oplog entry may or may not be the first oplog entry depending on packing.
+ assert.lte(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
+
+ assert.soonNoExcept(() => {
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert(secondaryTxnEntry);
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+ return true;
+ });
+
+ jsTestLog("Find prepare oplog entry");
+
+ const oplogEntry = PrepareHelpers.findPrepareEntry(primaryOplog);
+ assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
+ // Must already be written on secondary, since the config.transactions entry is.
+ const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ jsTestLog(`Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
+
+ assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(primaryOplog));
+ assert.soon(() => {
+ return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
+ });
+ assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(secondaryOplog));
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(session.abortTransaction_forTesting());
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
}
- // A new replica set for both the commit and abort tests to ensure the same clean state.
- function doTest(commitOrAbort) {
- const replSet = new ReplSetTest({
- // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions:
- {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
- nodes: [
- {storageEngine: "wiredTiger"},
- // inMemory node must not be a voter, otherwise lastCommitted never advances
- {storageEngine: "inMemory", rsConfig: {priority: 0, votes: 0}},
- ],
- waitForKeys: false
- });
-
- replSet.startSet(PrepareHelpers.replSetStartSetOptions);
- replSet.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
-
- const primary = replSet.getPrimary();
- const secondary = replSet.getSecondary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
- const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
-
- const coll = primary.getDB("test").test;
- assert.commandWorked(coll.insert({}));
-
- jsTestLog("Prepare a transaction");
-
- const session = primary.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const oldestRequiredTimestampForCrashRecovery =
- PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
- assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
-
- jsTestLog("Get transaction entry from config.transactions");
-
- const txnEntry = primary.getDB("config").transactions.findOne();
- // The prepare oplog entry may or may not be the first oplog entry depending on packing.
- assert.lte(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
-
- assert.soonNoExcept(() => {
- const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
- assert(secondaryTxnEntry);
- assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
- return true;
- });
-
- jsTestLog("Find prepare oplog entry");
-
- const oplogEntry = PrepareHelpers.findPrepareEntry(primaryOplog);
- assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
- // Must already be written on secondary, since the config.transactions entry is.
- const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
- assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
-
- jsTestLog("Insert documents until oplog exceeds oplogSize");
-
- // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- PrepareHelpers.growOplogPastMaxSize(replSet);
-
- jsTestLog(
- `Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
-
- assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(primaryOplog));
- assert.soon(() => {
- return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
- });
- assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(secondaryOplog));
-
- if (commitOrAbort === "commit") {
- jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- } else if (commitOrAbort === "abort") {
- jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(session.abortTransaction_forTesting());
- } else {
- throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
- }
-
- PrepareHelpers.awaitOplogTruncation(replSet);
-
- replSet.stopSet();
- }
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ replSet.stopSet();
+}
- doTest("commit");
- doTest("abort");
+doTest("commit");
+doTest("abort");
})();
diff --git a/jstests/replsets/interrupted_batch_insert.js b/jstests/replsets/interrupted_batch_insert.js
index ea0371e1be2..698e157f064 100644
--- a/jstests/replsets/interrupted_batch_insert.js
+++ b/jstests/replsets/interrupted_batch_insert.js
@@ -11,115 +11,109 @@
// though there was a rollback, those inserts will violate the {ordered: true} option.
(function() {
- "use strict";
-
- load('jstests/libs/parallelTester.js');
- load("jstests/replsets/rslib.js");
-
- var name = "interrupted_batch_insert";
- var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- var nodes = replTest.nodeList();
-
- var conns = replTest.startSet();
- replTest.initiate({
- _id: name,
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1]},
- {_id: 2, host: nodes[2], priority: 0}
- ]
+"use strict";
+
+load('jstests/libs/parallelTester.js');
+load("jstests/replsets/rslib.js");
+
+var name = "interrupted_batch_insert";
+var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+var nodes = replTest.nodeList();
+
+var conns = replTest.startSet();
+replTest.initiate({
+ _id: name,
+ members:
+ [{_id: 0, host: nodes[0]}, {_id: 1, host: nodes[1]}, {_id: 2, host: nodes[2], priority: 0}]
+});
+
+// The test starts with node 0 as the primary.
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+var primary = replTest.nodes[0];
+var collName = primary.getDB("db")[name].getFullName();
+
+var getParameterResult =
+ primary.getDB("admin").runCommand({getParameter: 1, internalInsertMaxBatchSize: 1});
+assert.commandWorked(getParameterResult);
+const batchSize = getParameterResult.internalInsertMaxBatchSize;
+
+// Prevent any writes to node 0 (the primary) from replicating to nodes 1 and 2.
+stopServerReplication(conns[1]);
+stopServerReplication(conns[2]);
+
+// Allow the primary to insert the first 5 batches of documents. After that, the fail point
+// activates, and the client thread hangs until the fail point gets turned off.
+assert.commandWorked(primary.getDB("db").adminCommand(
+ {configureFailPoint: "hangDuringBatchInsert", mode: {skip: 5}}));
+
+// In a background thread, issue an insert command to the primary that will insert 10 batches of
+// documents.
+var worker = new ScopedThread((host, collName, numToInsert) => {
+ // Insert elements [{idx: 0}, {idx: 1}, ..., {idx: numToInsert - 1}].
+ const docsToInsert = Array.from({length: numToInsert}, (_, i) => {
+ return {idx: i};
});
+ var coll = new Mongo(host).getCollection(collName);
+ assert.commandFailedWithCode(
+ coll.insert(docsToInsert, {writeConcern: {w: "majority", wtimeout: 5000}, ordered: true}),
+ ErrorCodes.InterruptedDueToReplStateChange);
+}, primary.host, collName, 10 * batchSize);
+worker.start();
- // The test starts with node 0 as the primary.
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replTest.nodes[0];
- var collName = primary.getDB("db")[name].getFullName();
-
- var getParameterResult =
- primary.getDB("admin").runCommand({getParameter: 1, internalInsertMaxBatchSize: 1});
- assert.commandWorked(getParameterResult);
- const batchSize = getParameterResult.internalInsertMaxBatchSize;
-
- // Prevent any writes to node 0 (the primary) from replicating to nodes 1 and 2.
- stopServerReplication(conns[1]);
- stopServerReplication(conns[2]);
-
- // Allow the primary to insert the first 5 batches of documents. After that, the fail point
- // activates, and the client thread hangs until the fail point gets turned off.
- assert.commandWorked(primary.getDB("db").adminCommand(
- {configureFailPoint: "hangDuringBatchInsert", mode: {skip: 5}}));
-
- // In a background thread, issue an insert command to the primary that will insert 10 batches of
- // documents.
- var worker = new ScopedThread((host, collName, numToInsert) => {
- // Insert elements [{idx: 0}, {idx: 1}, ..., {idx: numToInsert - 1}].
- const docsToInsert = Array.from({length: numToInsert}, (_, i) => {
- return {idx: i};
- });
- var coll = new Mongo(host).getCollection(collName);
- assert.commandFailedWithCode(
- coll.insert(docsToInsert,
- {writeConcern: {w: "majority", wtimeout: 5000}, ordered: true}),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, primary.host, collName, 10 * batchSize);
- worker.start();
-
- // Wait long enough to guarantee that all 5 batches of inserts have executed and the primary is
- // hung on the "hangDuringBatchInsert" fail point.
- checkLog.contains(primary, "hangDuringBatchInsert fail point enabled");
-
- // Make sure the insert command is, in fact, running in the background.
- assert.eq(primary.getDB("db").currentOp({"command.insert": name, active: true}).inprog.length,
- 1);
-
- // Completely isolate the current primary (node 0), forcing it to step down.
- conns[0].disconnect(conns[1]);
- conns[0].disconnect(conns[2]);
-
- // Wait for node 1, the only other eligible node, to become the new primary.
- replTest.waitForState(replTest.nodes[1], ReplSetTest.State.PRIMARY);
- assert.eq(replTest.nodes[1], replTest.getPrimary());
-
- restartServerReplication(conns[2]);
-
- // Issue a write to the new primary.
- var collOnNewPrimary = replTest.nodes[1].getCollection(collName);
- assert.writeOK(collOnNewPrimary.insert({singleDoc: 1}, {writeConcern: {w: "majority"}}));
-
- // Isolate node 1, forcing it to step down as primary, and reconnect node 0, allowing it to step
- // up again.
- conns[1].disconnect(conns[2]);
- conns[0].reconnect(conns[2]);
-
- // Wait for node 0 to become primary again.
- replTest.waitForState(primary, ReplSetTest.State.PRIMARY);
- assert.eq(replTest.nodes[0], replTest.getPrimary());
-
- // Allow the batch insert to continue.
- assert.commandWorked(primary.getDB("db").adminCommand(
- {configureFailPoint: "hangDuringBatchInsert", mode: "off"}));
-
- // Wait until the insert command is done.
- assert.soon(
- () =>
- primary.getDB("db").currentOp({"command.insert": name, active: true}).inprog.length ===
- 0);
-
- worker.join();
-
- var docs = primary.getDB("db")[name].find({idx: {$exists: 1}}).sort({idx: 1}).toArray();
-
- // Any discontinuity in the "idx" values is an error. If an "idx" document failed to insert, all
- // the of "idx" documents after it should also have failed to insert, because the insert
- // specified {ordered: 1}. Note, if none of the inserts were successful, that's fine.
- docs.forEach((element, index) => {
- assert.eq(element.idx, index);
- });
+// Wait long enough to guarantee that all 5 batches of inserts have executed and the primary is
+// hung on the "hangDuringBatchInsert" fail point.
+checkLog.contains(primary, "hangDuringBatchInsert fail point enabled");
+
+// Make sure the insert command is, in fact, running in the background.
+assert.eq(primary.getDB("db").currentOp({"command.insert": name, active: true}).inprog.length, 1);
+
+// Completely isolate the current primary (node 0), forcing it to step down.
+conns[0].disconnect(conns[1]);
+conns[0].disconnect(conns[2]);
+
+// Wait for node 1, the only other eligible node, to become the new primary.
+replTest.waitForState(replTest.nodes[1], ReplSetTest.State.PRIMARY);
+assert.eq(replTest.nodes[1], replTest.getPrimary());
+
+restartServerReplication(conns[2]);
+
+// Issue a write to the new primary.
+var collOnNewPrimary = replTest.nodes[1].getCollection(collName);
+assert.writeOK(collOnNewPrimary.insert({singleDoc: 1}, {writeConcern: {w: "majority"}}));
+
+// Isolate node 1, forcing it to step down as primary, and reconnect node 0, allowing it to step
+// up again.
+conns[1].disconnect(conns[2]);
+conns[0].reconnect(conns[2]);
+
+// Wait for node 0 to become primary again.
+replTest.waitForState(primary, ReplSetTest.State.PRIMARY);
+assert.eq(replTest.nodes[0], replTest.getPrimary());
+
+// Allow the batch insert to continue.
+assert.commandWorked(
+ primary.getDB("db").adminCommand({configureFailPoint: "hangDuringBatchInsert", mode: "off"}));
+
+// Wait until the insert command is done.
+assert.soon(
+ () =>
+ primary.getDB("db").currentOp({"command.insert": name, active: true}).inprog.length === 0);
+
+worker.join();
+
+var docs = primary.getDB("db")[name].find({idx: {$exists: 1}}).sort({idx: 1}).toArray();
+
+// Any discontinuity in the "idx" values is an error. If an "idx" document failed to insert, all
+// the of "idx" documents after it should also have failed to insert, because the insert
+// specified {ordered: 1}. Note, if none of the inserts were successful, that's fine.
+docs.forEach((element, index) => {
+ assert.eq(element.idx, index);
+});
- // Reconnect the remaining disconnected nodes, so we can exit.
- conns[0].reconnect(conns[1]);
- conns[1].reconnect(conns[2]);
- restartServerReplication(conns[1]);
+// Reconnect the remaining disconnected nodes, so we can exit.
+conns[0].reconnect(conns[1]);
+conns[1].reconnect(conns[2]);
+restartServerReplication(conns[1]);
- replTest.stopSet(15);
+replTest.stopSet(15);
}());
diff --git a/jstests/replsets/invalid_index_spec.js b/jstests/replsets/invalid_index_spec.js
index 2a5252cba94..2889ae08da7 100644
--- a/jstests/replsets/invalid_index_spec.js
+++ b/jstests/replsets/invalid_index_spec.js
@@ -4,60 +4,60 @@
*/
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js");
-
- const testName = "invalid_index_spec";
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- let primaryDB = replTest.getPrimary().getDB(testName);
- let secondary = replTest.getSecondary();
- let secondaryAdminDB = secondary.getDB("admin");
-
- // Set a fail point that allows for index creation with invalid spec fields.
- primaryDB.adminCommand(
- {configureFailPoint: "skipIndexCreateFieldNameValidation", mode: "alwaysOn"});
-
- clearRawMongoProgramOutput();
-
- // Create a V1 index with invalid spec field. Expected to replicate without error or server
- // abort.
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: "test", indexes: [{v: 1, name: "w_1", key: {w: 1}, invalidOption1: 1}]}));
-
- // Create a V2 index with invalid spec field. Expected to cause server abort on replication.
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: "test", indexes: [{v: 2, name: "x_1", key: {x: 1}, invalidOption2: 1}]}));
-
- assert.soon(function() {
- try {
- secondaryAdminDB.runCommand({ping: 1});
- } catch (e) {
- return true;
- }
- return false;
- }, "Node did not terminate due to invalid index spec", 60 * 1000);
-
- // fassert() calls std::abort(), which returns a different exit code for Windows vs. other
- // platforms.
- const exitCode = MongoRunner.EXIT_ABRUPT;
- replTest.stop(secondary, undefined, {allowedExitCode: exitCode});
-
- // During the transition from the old code path in IndexBuilder to IndexBuildsCoordinator, we
- // will accept the fatal assertion code from either component.
- const msgIndexBuilder = "Fatal Assertion 50769";
- const msgIndexBuildsCoordinator = "Fatal assertion 34437";
- const msgIndexErrorType = "InvalidIndexSpecificationOption";
- const msgIndexError = "The field 'invalidOption2'";
-
- assert((rawMongoProgramOutput().match(msgIndexBuilder) ||
- rawMongoProgramOutput().match(msgIndexBuildsCoordinator)) &&
- (rawMongoProgramOutput().match(msgIndexErrorType) &&
- rawMongoProgramOutput().match(msgIndexError)),
- "Replication should have aborted on invalid index specification");
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/replsets/rslib.js");
+
+const testName = "invalid_index_spec";
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+let primaryDB = replTest.getPrimary().getDB(testName);
+let secondary = replTest.getSecondary();
+let secondaryAdminDB = secondary.getDB("admin");
+
+// Set a fail point that allows for index creation with invalid spec fields.
+primaryDB.adminCommand(
+ {configureFailPoint: "skipIndexCreateFieldNameValidation", mode: "alwaysOn"});
+
+clearRawMongoProgramOutput();
+
+// Create a V1 index with invalid spec field. Expected to replicate without error or server
+// abort.
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: "test", indexes: [{v: 1, name: "w_1", key: {w: 1}, invalidOption1: 1}]}));
+
+// Create a V2 index with invalid spec field. Expected to cause server abort on replication.
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: "test", indexes: [{v: 2, name: "x_1", key: {x: 1}, invalidOption2: 1}]}));
+
+assert.soon(function() {
+ try {
+ secondaryAdminDB.runCommand({ping: 1});
+ } catch (e) {
+ return true;
+ }
+ return false;
+}, "Node did not terminate due to invalid index spec", 60 * 1000);
+
+// fassert() calls std::abort(), which returns a different exit code for Windows vs. other
+// platforms.
+const exitCode = MongoRunner.EXIT_ABRUPT;
+replTest.stop(secondary, undefined, {allowedExitCode: exitCode});
+
+// During the transition from the old code path in IndexBuilder to IndexBuildsCoordinator, we
+// will accept the fatal assertion code from either component.
+const msgIndexBuilder = "Fatal Assertion 50769";
+const msgIndexBuildsCoordinator = "Fatal assertion 34437";
+const msgIndexErrorType = "InvalidIndexSpecificationOption";
+const msgIndexError = "The field 'invalidOption2'";
+
+assert((rawMongoProgramOutput().match(msgIndexBuilder) ||
+ rawMongoProgramOutput().match(msgIndexBuildsCoordinator)) &&
+ (rawMongoProgramOutput().match(msgIndexErrorType) &&
+ rawMongoProgramOutput().match(msgIndexError)),
+ "Replication should have aborted on invalid index specification");
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/ismaster1.js b/jstests/replsets/ismaster1.js
index 2c9a67d4856..3cd6cb0a8dc 100644
--- a/jstests/replsets/ismaster1.js
+++ b/jstests/replsets/ismaster1.js
@@ -57,7 +57,7 @@ var checkMember = function(memberInfo) {
// make sure the result has proper values for fields with known values
var badValues = []; // each mistake will be saved as three entries (key, badvalue, goodvalue)
for (field in memberInfo.goodValues) {
- if (typeof(memberInfo.goodValues[field]) === "object") {
+ if (typeof (memberInfo.goodValues[field]) === "object") {
// assumes nested obj is disk in tags this is currently true, but may change
if (result[field].disk !== memberInfo.goodValues[field].disk) {
badValues.push("tags.disk");
@@ -92,7 +92,6 @@ config.members[3].arbiterOnly = true;
replTest.initiate(config);
var agreeOnPrimaryAndSetVersion = function(setVersion) {
-
print("Waiting for primary and replica set version " + setVersion);
var nodes = replTest.nodes;
@@ -228,8 +227,7 @@ checkMember({
ok: 1
},
wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
- unwantedFields:
- ["arbiterOnly", "passives", "passive", "slaveDelay", "hidden", "buildIndexes"]
+ unwantedFields: ["arbiterOnly", "passives", "passive", "slaveDelay", "hidden", "buildIndexes"]
});
checkMember({
diff --git a/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js b/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js
index 237fc1880fc..d5568cedf99 100644
--- a/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js
+++ b/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js
@@ -6,108 +6,107 @@
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
-
- // Start one of the nodes with priority: 0 to avoid elections.
- var rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- let primary = rst.getPrimary();
-
- const dbName = "test";
- const collName = "kill_reads_with_prepare_conflicts_during_step_down";
-
- const primaryDB = primary.getDB(dbName);
- // Used to make sure that the correct amount of operations were killed on this node
- // during stepdown.
- const primaryAdmin = primary.getDB("admin");
- const primaryColl = primaryDB[collName];
-
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- assert.commandWorked(primaryAdmin.adminCommand(
- {configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
-
- // Insert a document that we will later modify in a transaction.
- assert.commandWorked(primaryColl.insert({_id: 1}));
-
- jsTestLog("Start a transaction and prepare it");
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- TestData.dbName = dbName;
- TestData.collName = collName;
-
- const readBlockedOnPrepareConflictThread = startParallelShell(() => {
- const parallelTestDB = db.getSiblingDB(TestData.dbName);
- const parallelTestCollName = TestData.collName;
-
- // Advance the clusterTime with another insert.
- let res = assert.commandWorked(parallelTestDB.runCommand(
- {insert: parallelTestCollName, documents: [{advanceClusterTime: 1}]}));
- assert(res.hasOwnProperty("$clusterTime"), res);
- assert(res.$clusterTime.hasOwnProperty("clusterTime"), res);
- const clusterTime = res.$clusterTime.clusterTime;
- jsTestLog("Using afterClusterTime: " + clusterTime);
-
- // The following read should block on the prepared transaction since it will be
- // reading a conflicting document using an afterClusterTime later than the
- // prepareTimestamp.
- assert.commandFailedWithCode(parallelTestDB.runCommand({
- find: parallelTestCollName,
- filter: {_id: 1},
- readConcern: {afterClusterTime: clusterTime}
- }),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, primary.port);
-
- jsTestLog("Waiting for failpoint");
- checkLog.contains(primary, "WTPrintPrepareConflictLog fail point enabled");
-
- // Once we have confirmed that the find command has hit a prepare conflict, we can perform
- // a step down.
- jsTestLog("Stepping down primary");
- assert.commandWorked(
- primaryAdmin.adminCommand({replSetStepDown: 60 * 10 /* 10 minutes */, force: true}));
-
- readBlockedOnPrepareConflictThread();
-
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Validate that the read operation got killed during step down.
- let replMetrics =
- assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
- assert.eq(replMetrics.stepDown.userOperationsKilled, 1);
-
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- primary = rst.getPrimary();
-
- // Make sure we can successfully commit the prepared transaction.
- jsTestLog("Restoring shell session state");
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- // The transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session.setTxnNumber_forTesting(0);
- const txnNumber = session.getTxnNumber_forTesting();
-
- jsTestLog("Committing transaction");
- // Commit the transaction.
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
-
- rst.stopSet();
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+
+// Start one of the nodes with priority: 0 to avoid elections.
+var rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+let primary = rst.getPrimary();
+
+const dbName = "test";
+const collName = "kill_reads_with_prepare_conflicts_during_step_down";
+
+const primaryDB = primary.getDB(dbName);
+// Used to make sure that the correct amount of operations were killed on this node
+// during stepdown.
+const primaryAdmin = primary.getDB("admin");
+const primaryColl = primaryDB[collName];
+
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+assert.commandWorked(
+ primaryAdmin.adminCommand({configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
+
+// Insert a document that we will later modify in a transaction.
+assert.commandWorked(primaryColl.insert({_id: 1}));
+
+jsTestLog("Start a transaction and prepare it");
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+
+const readBlockedOnPrepareConflictThread = startParallelShell(() => {
+ const parallelTestDB = db.getSiblingDB(TestData.dbName);
+ const parallelTestCollName = TestData.collName;
+
+ // Advance the clusterTime with another insert.
+ let res = assert.commandWorked(parallelTestDB.runCommand(
+ {insert: parallelTestCollName, documents: [{advanceClusterTime: 1}]}));
+ assert(res.hasOwnProperty("$clusterTime"), res);
+ assert(res.$clusterTime.hasOwnProperty("clusterTime"), res);
+ const clusterTime = res.$clusterTime.clusterTime;
+ jsTestLog("Using afterClusterTime: " + clusterTime);
+
+ // The following read should block on the prepared transaction since it will be
+ // reading a conflicting document using an afterClusterTime later than the
+ // prepareTimestamp.
+ assert.commandFailedWithCode(parallelTestDB.runCommand({
+ find: parallelTestCollName,
+ filter: {_id: 1},
+ readConcern: {afterClusterTime: clusterTime}
+ }),
+ ErrorCodes.InterruptedDueToReplStateChange);
+}, primary.port);
+
+jsTestLog("Waiting for failpoint");
+checkLog.contains(primary, "WTPrintPrepareConflictLog fail point enabled");
+
+// Once we have confirmed that the find command has hit a prepare conflict, we can perform
+// a step down.
+jsTestLog("Stepping down primary");
+assert.commandWorked(
+ primaryAdmin.adminCommand({replSetStepDown: 60 * 10 /* 10 minutes */, force: true}));
+
+readBlockedOnPrepareConflictThread();
+
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// Validate that the read operation got killed during step down.
+let replMetrics = assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
+assert.eq(replMetrics.stepDown.userOperationsKilled, 1);
+
+// Allow the primary to be re-elected, and wait for it.
+assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+primary = rst.getPrimary();
+
+// Make sure we can successfully commit the prepared transaction.
+jsTestLog("Restoring shell session state");
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+// The transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session.setTxnNumber_forTesting(0);
+const txnNumber = session.getTxnNumber_forTesting();
+
+jsTestLog("Committing transaction");
+// Commit the transaction.
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
+
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/kill_ttl_on_stepdown.js b/jstests/replsets/kill_ttl_on_stepdown.js
index 9dc8b619034..18738a3e7ff 100644
--- a/jstests/replsets/kill_ttl_on_stepdown.js
+++ b/jstests/replsets/kill_ttl_on_stepdown.js
@@ -5,66 +5,66 @@
* @tags: [requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const dbName = "kill_ttl_on_stepdown";
+const dbName = "kill_ttl_on_stepdown";
- const rst = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {setParameter: "ttlMonitorSleepSecs=15"}
- });
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {setParameter: "ttlMonitorSleepSecs=15"}
+});
+rst.startSet();
+rst.initiate();
- let primary = rst.getPrimary();
- let db = primary.getDB(dbName);
+let primary = rst.getPrimary();
+let db = primary.getDB(dbName);
- // Create a TTL index.
- db.getCollection("test").createIndex({x: 1}, {expireAfterSeconds: 3600});
+// Create a TTL index.
+db.getCollection("test").createIndex({x: 1}, {expireAfterSeconds: 3600});
- function getNumTTLPasses() {
- let serverStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
- return serverStatus.metrics.ttl.passes;
- }
+function getNumTTLPasses() {
+ let serverStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+ return serverStatus.metrics.ttl.passes;
+}
- // Let the TTLMonitor do some passes.
- assert.soon(() => {
- return getNumTTLPasses() > 0;
- }, "TTLMonitor never did any passes.");
+// Let the TTLMonitor do some passes.
+assert.soon(() => {
+ return getNumTTLPasses() > 0;
+}, "TTLMonitor never did any passes.");
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "hangTTLMonitorWithLock", mode: "alwaysOn"}));
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "hangTTLMonitorWithLock", mode: "alwaysOn"}));
- checkLog.contains(rst.getPrimary(), "Hanging due to hangTTLMonitorWithLock fail point");
+checkLog.contains(rst.getPrimary(), "Hanging due to hangTTLMonitorWithLock fail point");
- // See how many passes the TTLMonitor has done, before we stepdown the primary, killing it.
- let ttlPassesBeforeStepdown = getNumTTLPasses();
+// See how many passes the TTLMonitor has done, before we stepdown the primary, killing it.
+let ttlPassesBeforeStepdown = getNumTTLPasses();
- // Force a stepdown of the primary.
- assert.commandWorked(primary.getDB("admin").runCommand(
- {replSetStepDown: 60 * 10 /* 10 minutes */, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
- assert.commandWorked(primary.adminCommand({replSetStepUp: 1}));
+// Force a stepdown of the primary.
+assert.commandWorked(
+ primary.getDB("admin").runCommand({replSetStepDown: 60 * 10 /* 10 minutes */, force: true}));
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+assert.commandWorked(primary.adminCommand({replSetStepUp: 1}));
- primary = rst.getPrimary();
+primary = rst.getPrimary();
- // Ensure the TTLMonitor was interrupted.
- checkLog.contains(primary, "TTLMonitor was interrupted");
+// Ensure the TTLMonitor was interrupted.
+checkLog.contains(primary, "TTLMonitor was interrupted");
- // Disable the failpoint on the node that stepped down.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "hangTTLMonitorWithLock", mode: "off"}));
+// Disable the failpoint on the node that stepped down.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "hangTTLMonitorWithLock", mode: "off"}));
- // Wait until the number TTLMonitor passes increases, informing us that the TTLMonitor thread
- // was not killed entirely and will continue to run after stepdown finishes.
- assert.soon(() => {
- if (getNumTTLPasses() > ttlPassesBeforeStepdown) {
- return true;
- }
- }, "TTLMonitor was not running after stepdown");
+// Wait until the number TTLMonitor passes increases, informing us that the TTLMonitor thread
+// was not killed entirely and will continue to run after stepdown finishes.
+assert.soon(() => {
+ if (getNumTTLPasses() > ttlPassesBeforeStepdown) {
+ return true;
+ }
+}, "TTLMonitor was not running after stepdown");
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js b/jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js
index 3ad0bd22248..49d5a1dabc6 100644
--- a/jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js
+++ b/jstests/replsets/kills_reads_with_prepare_conflicts_during_stepup.js
@@ -6,123 +6,125 @@
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
-
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
-
- const config = rst.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election before
- // we make the secondary step up.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
-
- let primary = rst.getPrimary();
- let secondary = rst.getSecondary();
-
- const dbName = "test";
- const collName = "kill_reads_with_prepare_conflicts_during_step_up";
-
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB[collName];
-
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
-
- // Insert a document that we will later modify in a transaction.
- assert.commandWorked(primaryColl.insert({_id: 1}));
-
- jsTestLog("Start a transaction and prepare it");
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Advance the clusterTime with another insert.
- const clusterTimeAfterPrepare =
- assert
- .commandWorked(primaryColl.runCommand(
- "insert", {documents: [{advanceClusterTime: 1}], writeConcern: {w: "majority"}}))
- .operationTime;
-
- // Ensure that the secondary replicates the prepare and the additional insert.
- rst.awaitReplication();
-
- // Make sure a secondary read using afterClusterTime times out when trying to
- // read a prepared document.
- const secondaryDB = secondary.getDB(dbName);
- assert.commandFailedWithCode(secondaryDB.runCommand({
- find: collName,
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+
+const config = rst.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election before
+// we make the secondary step up.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
+
+let primary = rst.getPrimary();
+let secondary = rst.getSecondary();
+
+const dbName = "test";
+const collName = "kill_reads_with_prepare_conflicts_during_step_up";
+
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB[collName];
+
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
+
+// Insert a document that we will later modify in a transaction.
+assert.commandWorked(primaryColl.insert({_id: 1}));
+
+jsTestLog("Start a transaction and prepare it");
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Advance the clusterTime with another insert.
+const clusterTimeAfterPrepare =
+ assert
+ .commandWorked(primaryColl.runCommand(
+ "insert", {documents: [{advanceClusterTime: 1}], writeConcern: {w: "majority"}}))
+ .operationTime;
+
+// Ensure that the secondary replicates the prepare and the additional insert.
+rst.awaitReplication();
+
+// Make sure a secondary read using afterClusterTime times out when trying to
+// read a prepared document.
+const secondaryDB = secondary.getDB(dbName);
+assert.commandFailedWithCode(secondaryDB.runCommand({
+ find: collName,
+ filter: {_id: 1},
+ readConcern: {afterClusterTime: clusterTimeAfterPrepare},
+ maxTimeMS: 2 * 1000 // 2 seconds
+}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Clear secondary log so that when we wait for the WTPrintPrepareConflictLog fail point, we
+// do not count the previous find.
+assert.commandWorked(secondaryDB.adminCommand({clearLog: "global"}));
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+TestData.clusterTime = clusterTimeAfterPrepare;
+
+const waitForSecondaryReadBlockedOnPrepareConflictThread = startParallelShell(() => {
+ // Allow for secondary reads.
+ db.getMongo().setSlaveOk();
+ const parallelTestDB = db.getSiblingDB(TestData.dbName);
+ const parallelTestCollName = TestData.collName;
+
+ // The following read should block on the prepared transaction since it will be
+ // reading a conflicting document using an afterClusterTime later than the
+ // prepareTimestamp.
+ assert.commandFailedWithCode(parallelTestDB.runCommand({
+ find: parallelTestCollName,
filter: {_id: 1},
- readConcern: {afterClusterTime: clusterTimeAfterPrepare},
- maxTimeMS: 2 * 1000 // 2 seconds
+ readConcern: {afterClusterTime: TestData.clusterTime}
}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Clear secondary log so that when we wait for the WTPrintPrepareConflictLog fail point, we
- // do not count the previous find.
- assert.commandWorked(secondaryDB.adminCommand({clearLog: "global"}));
-
- TestData.dbName = dbName;
- TestData.collName = collName;
- TestData.clusterTime = clusterTimeAfterPrepare;
-
- const waitForSecondaryReadBlockedOnPrepareConflictThread = startParallelShell(() => {
- // Allow for secondary reads.
- db.getMongo().setSlaveOk();
- const parallelTestDB = db.getSiblingDB(TestData.dbName);
- const parallelTestCollName = TestData.collName;
-
- // The following read should block on the prepared transaction since it will be
- // reading a conflicting document using an afterClusterTime later than the
- // prepareTimestamp.
- assert.commandFailedWithCode(parallelTestDB.runCommand({
- find: parallelTestCollName,
- filter: {_id: 1},
- readConcern: {afterClusterTime: TestData.clusterTime}
- }),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, secondary.port);
-
- jsTestLog("Waiting for failpoint");
- checkLog.contains(secondary, "WTPrintPrepareConflictLog fail point enabled");
-
- // Once we've confirmed that the find command has hit a prepare conflict on the secondary, cause
- // that secondary to step up.
- jsTestLog("Stepping up secondary");
- rst.stepUp(secondary);
-
- waitForSecondaryReadBlockedOnPrepareConflictThread();
-
- rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- primary = rst.getPrimary();
-
- // Make sure we can successfully commit the prepared transaction.
- jsTestLog("Restoring shell session state");
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- // The transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session.setTxnNumber_forTesting(0);
- const txnNumber = session.getTxnNumber_forTesting();
-
- jsTestLog("Committing transaction");
- // Commit the transaction.
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
-
- rst.stopSet();
+ ErrorCodes.InterruptedDueToReplStateChange);
+}, secondary.port);
+
+jsTestLog("Waiting for failpoint");
+checkLog.contains(secondary, "WTPrintPrepareConflictLog fail point enabled");
+
+// Once we've confirmed that the find command has hit a prepare conflict on the secondary, cause
+// that secondary to step up.
+jsTestLog("Stepping up secondary");
+rst.stepUp(secondary);
+
+waitForSecondaryReadBlockedOnPrepareConflictThread();
+
+rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+primary = rst.getPrimary();
+
+// Make sure we can successfully commit the prepared transaction.
+jsTestLog("Restoring shell session state");
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+// The transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session.setTxnNumber_forTesting(0);
+const txnNumber = session.getTxnNumber_forTesting();
+
+jsTestLog("Committing transaction");
+// Commit the transaction.
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
+
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/last_error_reported_after_stepdown.js b/jstests/replsets/last_error_reported_after_stepdown.js
index dbf2533fccc..bfd0e43bb8a 100644
--- a/jstests/replsets/last_error_reported_after_stepdown.js
+++ b/jstests/replsets/last_error_reported_after_stepdown.js
@@ -3,111 +3,110 @@
* stepdown.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const primaryAdmin = primary.getDB("admin");
- // We need a separate connection to avoid interference with the ReplSetTestMechanism.
- const primaryDataConn = new Mongo(primary.host);
- const primaryDb = primaryDataConn.getDB("test");
- const collname = "last_error_reported_after_stepdown";
- const coll = primaryDb[collname];
+const primary = rst.getPrimary();
+const primaryAdmin = primary.getDB("admin");
+// We need a separate connection to avoid interference with the ReplSetTestMechanism.
+const primaryDataConn = new Mongo(primary.host);
+const primaryDb = primaryDataConn.getDB("test");
+const collname = "last_error_reported_after_stepdown";
+const coll = primaryDb[collname];
- // Never retry on network error, because this test needs to detect the network error.
- TestData.skipRetryOnNetworkError = true;
+// Never retry on network error, because this test needs to detect the network error.
+TestData.skipRetryOnNetworkError = true;
- // This is specifically testing unacknowledged legacy writes.
- primaryDataConn.forceWriteMode('legacy');
+// This is specifically testing unacknowledged legacy writes.
+primaryDataConn.forceWriteMode('legacy');
- assert.commandWorked(
- coll.insert([{_id: 'deleteme'}, {_id: 'updateme', nullfield: null}, {_id: 'findme'}],
- {writeConcern: {w: 1}}));
- rst.awaitReplication();
-
- // Note that "operation" should always be on primaryDataConn, so the stepdown doesn't clear
- // the last error.
- function runStepDownTest({description, logMsg, operation, errorCode, nDocs}) {
- jsTestLog(`Trying ${description} on the primary, then stepping down`);
- // We need to make sure the command is complete before stepping down.
- assert.commandWorked(
- primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 1}}));
- operation();
- // Wait for the operation to complete.
- checkLog.contains(primary, logMsg + ' appName: "MongoDB Shell"');
- assert.commandWorked(
- primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 0}}));
- assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- var lastError = assert.commandWorked(primaryDb.runCommand({getLastError: 1}));
- if (typeof(errorCode) == "number")
- assert.eq(lastError.code,
- errorCode,
- "Expected error code " + errorCode + ", got lastError of " +
- JSON.stringify(lastError));
- else {
- assert(!lastError.err,
- "Expected no error, got lastError of " + JSON.stringify(lastError));
- }
- if (typeof(nDocs) == "number") {
- assert.eq(lastError.n, nDocs, "Wrong number of documents modified or updated");
- }
+assert.commandWorked(
+ coll.insert([{_id: 'deleteme'}, {_id: 'updateme', nullfield: null}, {_id: 'findme'}],
+ {writeConcern: {w: 1}}));
+rst.awaitReplication();
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
+// Note that "operation" should always be on primaryDataConn, so the stepdown doesn't clear
+// the last error.
+function runStepDownTest({description, logMsg, operation, errorCode, nDocs}) {
+ jsTestLog(`Trying ${description} on the primary, then stepping down`);
+ // We need to make sure the command is complete before stepping down.
+ assert.commandWorked(
+ primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 1}}));
+ operation();
+ // Wait for the operation to complete.
+ checkLog.contains(primary, logMsg + ' appName: "MongoDB Shell"');
+ assert.commandWorked(
+ primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 0}}));
+ assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ var lastError = assert.commandWorked(primaryDb.runCommand({getLastError: 1}));
+ if (typeof (errorCode) == "number")
+ assert.eq(
+ lastError.code,
+ errorCode,
+ "Expected error code " + errorCode + ", got lastError of " + JSON.stringify(lastError));
+ else {
+ assert(!lastError.err, "Expected no error, got lastError of " + JSON.stringify(lastError));
+ }
+ if (typeof (nDocs) == "number") {
+ assert.eq(lastError.n, nDocs, "Wrong number of documents modified or updated");
}
- // Tests which should have no errors.
- // Clear log messages to avoid picking up the log of the insertion of the 'deleteme'
- // document.
- assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
- runStepDownTest({
- description: "insert",
- logMsg: "insert " + coll.getFullName(),
- operation: () => coll.insert({_id: 0})
- });
- runStepDownTest({
- description: "update",
- logMsg: "update ",
- operation: () => coll.update({_id: 'updateme'}, {'$inc': {x: 1}}),
- nDocs: 1
- });
- runStepDownTest({
- description: "remove",
- logMsg: "remove ",
- operation: () => coll.remove({_id: 'deleteme'}),
- nDocs: 1
- });
+ // Allow the primary to be re-elected, and wait for it.
+ assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+}
+
+// Tests which should have no errors.
+// Clear log messages to avoid picking up the log of the insertion of the 'deleteme'
+// document.
+assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
+runStepDownTest({
+ description: "insert",
+ logMsg: "insert " + coll.getFullName(),
+ operation: () => coll.insert({_id: 0})
+});
+runStepDownTest({
+ description: "update",
+ logMsg: "update ",
+ operation: () => coll.update({_id: 'updateme'}, {'$inc': {x: 1}}),
+ nDocs: 1
+});
+runStepDownTest({
+ description: "remove",
+ logMsg: "remove ",
+ operation: () => coll.remove({_id: 'deleteme'}),
+ nDocs: 1
+});
- // Tests which should have errors.
- // We repeat log messages from tests above, so clear the log first.
- assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
- runStepDownTest({
- description: "insert with error",
- logMsg: "insert " + coll.getFullName(),
- operation: () => coll.insert({_id: 0}),
- errorCode: ErrorCodes.DuplicateKey
- });
- runStepDownTest({
- description: "update with error",
- logMsg: "update ",
- operation: () => coll.update({_id: 'updateme'}, {'$inc': {nullfield: 1}}),
- errorCode: ErrorCodes.TypeMismatch,
- nDocs: 0
- });
- runStepDownTest({
- description: "remove with error",
- logMsg: "remove ",
- operation: () => coll.remove({'$nonsense': {x: 1}}),
- errorCode: ErrorCodes.BadValue,
- nDocs: 0
- });
+// Tests which should have errors.
+// We repeat log messages from tests above, so clear the log first.
+assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
+runStepDownTest({
+ description: "insert with error",
+ logMsg: "insert " + coll.getFullName(),
+ operation: () => coll.insert({_id: 0}),
+ errorCode: ErrorCodes.DuplicateKey
+});
+runStepDownTest({
+ description: "update with error",
+ logMsg: "update ",
+ operation: () => coll.update({_id: 'updateme'}, {'$inc': {nullfield: 1}}),
+ errorCode: ErrorCodes.TypeMismatch,
+ nDocs: 0
+});
+runStepDownTest({
+ description: "remove with error",
+ logMsg: "remove ",
+ operation: () => coll.remove({'$nonsense': {x: 1}}),
+ errorCode: ErrorCodes.BadValue,
+ nDocs: 0
+});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/last_op_visible.js b/jstests/replsets/last_op_visible.js
index 4b8b70a24b4..94a0b32cbf0 100644
--- a/jstests/replsets/last_op_visible.js
+++ b/jstests/replsets/last_op_visible.js
@@ -8,51 +8,50 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
+"use strict";
- var name = 'lastOpVisible';
- var replTest = new ReplSetTest(
- {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}, waitForKeys: true});
+var name = 'lastOpVisible';
+var replTest = new ReplSetTest(
+ {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}, waitForKeys: true});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- replTest.stopSet();
- return;
- }
- replTest.initiate();
-
- var primary = replTest.getPrimary();
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ replTest.stopSet();
+ return;
+}
+replTest.initiate();
- // Do an insert without writeConcern.
- var res = primary.getDB(name).runCommandWithMetadata({insert: name, documents: [{x: 1}]},
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
- var last_op_visible = res.commandReply["$replData"].lastOpVisible;
+var primary = replTest.getPrimary();
- // A find should return the same lastVisibleOp.
- res = primary.getDB(name).runCommandWithMetadata({find: name, readConcern: {level: "local"}},
+// Do an insert without writeConcern.
+var res = primary.getDB(name).runCommandWithMetadata({insert: name, documents: [{x: 1}]},
{"$replData": 1});
- assert.commandWorked(res.commandReply);
- assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
-
- // A majority readConcern with afterOpTime: lastOpVisible should also return the same
- // lastVisibleOp.
- res = primary.getDB(name).runCommandWithMetadata(
- {find: name, readConcern: {level: "majority", afterOpTime: last_op_visible}},
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
- assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
-
- // Do an insert without writeConcern.
- res = primary.getDB(name).runCommandWithMetadata(
- {insert: name, documents: [{x: 1}], writeConcern: {w: "majority"}}, {"$replData": 1});
- assert.commandWorked(res.commandReply);
- last_op_visible = res.commandReply["$replData"].lastOpVisible;
-
- // A majority readConcern should return the same lastVisibleOp.
- res = primary.getDB(name).runCommandWithMetadata({find: name, readConcern: {level: "majority"}},
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
- assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
- replTest.stopSet();
+assert.commandWorked(res.commandReply);
+var last_op_visible = res.commandReply["$replData"].lastOpVisible;
+
+// A find should return the same lastVisibleOp.
+res = primary.getDB(name).runCommandWithMetadata({find: name, readConcern: {level: "local"}},
+ {"$replData": 1});
+assert.commandWorked(res.commandReply);
+assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
+
+// A majority readConcern with afterOpTime: lastOpVisible should also return the same
+// lastVisibleOp.
+res = primary.getDB(name).runCommandWithMetadata(
+ {find: name, readConcern: {level: "majority", afterOpTime: last_op_visible}}, {"$replData": 1});
+assert.commandWorked(res.commandReply);
+assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
+
+// Do an insert without writeConcern.
+res = primary.getDB(name).runCommandWithMetadata(
+ {insert: name, documents: [{x: 1}], writeConcern: {w: "majority"}}, {"$replData": 1});
+assert.commandWorked(res.commandReply);
+last_op_visible = res.commandReply["$replData"].lastOpVisible;
+
+// A majority readConcern should return the same lastVisibleOp.
+res = primary.getDB(name).runCommandWithMetadata({find: name, readConcern: {level: "majority"}},
+ {"$replData": 1});
+assert.commandWorked(res.commandReply);
+assert.eq(last_op_visible, res.commandReply["$replData"].lastOpVisible);
+replTest.stopSet();
}());
diff --git a/jstests/replsets/last_vote.js b/jstests/replsets/last_vote.js
index 6ff7198b3d3..62901259364 100644
--- a/jstests/replsets/last_vote.js
+++ b/jstests/replsets/last_vote.js
@@ -11,216 +11,207 @@
// @tags: [requires_persistence]
(function() {
- "use strict";
- load("jstests/replsets/rslib.js"); // For getLatestOp()
-
- var name = "last_vote";
- var rst = new ReplSetTest({
- name: name,
- nodes: 2,
- });
- rst.startSet();
-
- // Lower the election timeout to make the test run faster since it waits for multiple elections.
- var conf = rst.getReplSetConfig();
- conf.settings = {
- electionTimeoutMillis: 6000,
- };
- rst.initiate(conf);
-
- const lastVoteNS = 'local.replset.election';
-
- function getLastVoteDoc(conn) {
- assert.eq(
- conn.getCollection(lastVoteNS).find().itcount(), 1, 'last vote should be singleton');
- return conn.getCollection(lastVoteNS).findOne();
+"use strict";
+load("jstests/replsets/rslib.js"); // For getLatestOp()
+
+var name = "last_vote";
+var rst = new ReplSetTest({
+ name: name,
+ nodes: 2,
+});
+rst.startSet();
+
+// Lower the election timeout to make the test run faster since it waits for multiple elections.
+var conf = rst.getReplSetConfig();
+conf.settings = {
+ electionTimeoutMillis: 6000,
+};
+rst.initiate(conf);
+
+const lastVoteNS = 'local.replset.election';
+
+function getLastVoteDoc(conn) {
+ assert.eq(conn.getCollection(lastVoteNS).find().itcount(), 1, 'last vote should be singleton');
+ return conn.getCollection(lastVoteNS).findOne();
+}
+
+function setLastVoteDoc(conn, term, candidate) {
+ var newLastVote = {term: term, candidateIndex: rst.getNodeId(candidate)};
+ return assert.writeOK(conn.getCollection(lastVoteNS).update({}, newLastVote));
+}
+
+function assertNodeHasLastVote(node, term, candidate) {
+ var lastVoteDoc = getLastVoteDoc(node);
+ assert.eq(lastVoteDoc.term, term, node.host + " had wrong last vote term.");
+ assert.eq(lastVoteDoc.candidateIndex,
+ rst.getNodeId(candidate),
+ node.host + " had wrong last vote candidate.");
+}
+
+function assertCurrentTerm(node, term) {
+ var stat = assert.commandWorked(node.adminCommand({replSetGetStatus: 1}));
+ assert.eq(stat.term, term, "Term changed when it should not have");
+}
+
+jsTestLog("Test that last vote is set on successive elections");
+
+// Run a few successive elections, alternating who becomes primary.
+var numElections = 3;
+for (var i = 0; i < numElections; i++) {
+ var primary = rst.getPrimary();
+ var secondary = rst.getSecondary();
+ var term = getLatestOp(primary).t;
+
+ // SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the
+ // correct size, so secondaries didn't vote in the first election.
+ if (i > 0) {
+ jsTestLog("Last vote should have term: " + term + " and candidate: " + primary.host +
+ ", index: " + rst.getNodeId(primary));
+ rst.nodes.forEach(function(node) {
+ assertNodeHasLastVote(node, term, primary);
+ });
}
+ assert.commandWorked(primary.adminCommand({replSetStepDown: 60 * 10, force: true}));
- function setLastVoteDoc(conn, term, candidate) {
- var newLastVote = {term: term, candidateIndex: rst.getNodeId(candidate)};
- return assert.writeOK(conn.getCollection(lastVoteNS).update({}, newLastVote));
- }
+ // Make sure a new primary has been established.
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
- function assertNodeHasLastVote(node, term, candidate) {
- var lastVoteDoc = getLastVoteDoc(node);
- assert.eq(lastVoteDoc.term, term, node.host + " had wrong last vote term.");
- assert.eq(lastVoteDoc.candidateIndex,
- rst.getNodeId(candidate),
- node.host + " had wrong last vote candidate.");
- }
+ // Reset election timeout for the old primary.
+ assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+}
- function assertCurrentTerm(node, term) {
- var stat = assert.commandWorked(node.adminCommand({replSetGetStatus: 1}));
- assert.eq(stat.term, term, "Term changed when it should not have");
- }
+var term = getLatestOp(rst.getPrimary()).t + 100;
- jsTestLog("Test that last vote is set on successive elections");
-
- // Run a few successive elections, alternating who becomes primary.
- var numElections = 3;
- for (var i = 0; i < numElections; i++) {
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
- var term = getLatestOp(primary).t;
-
- // SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the
- // correct size, so secondaries didn't vote in the first election.
- if (i > 0) {
- jsTestLog("Last vote should have term: " + term + " and candidate: " + primary.host +
- ", index: " + rst.getNodeId(primary));
- rst.nodes.forEach(function(node) {
- assertNodeHasLastVote(node, term, primary);
- });
- }
- assert.commandWorked(primary.adminCommand({replSetStepDown: 60 * 10, force: true}));
-
- // Make sure a new primary has been established.
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
-
- // Reset election timeout for the old primary.
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
- }
+jsTestLog("Test that last vote is loaded on startup");
- var term = getLatestOp(rst.getPrimary()).t + 100;
-
- jsTestLog("Test that last vote is loaded on startup");
-
- // Ensure that all ops are replicated before stepping up node 1.
- rst.awaitReplication();
-
- // We cannot reconfig node 0 to have priority 0 if it is currently the primary,
- // so we make sure node 1 is primary.
- jsTestLog("Stepping up node 1");
- rst.stepUp(rst.nodes[1]);
-
- jsTestLog("Reconfiguring cluster to make node 0 unelectable so it stays SECONDARY on restart");
- conf = rst.getReplSetConfigFromNode();
- conf.version++;
- conf.members[0].priority = 0;
- reconfig(rst, conf);
- rst.awaitNodesAgreeOnConfigVersion();
-
- jsTestLog("Restarting node 0 as a standalone");
- var node0 = rst.restart(0, {noReplSet: true}); // Restart as a standalone node.
- jsTestLog("Stopping node 1");
- rst.stop(1); // Stop node 1 so that node 0 controls the term by itself.
- jsTestLog("Setting the lastVote on node 0 to term: " + term + " candidate: " +
- rst.nodes[0].host + ", index: 0");
- setLastVoteDoc(node0, term, rst.nodes[0]);
-
- jsTestLog("Restarting node 0 in replica set mode");
- node0 = rst.restart(0); // Restart in replSet mode again.
- rst.waitForState(node0, ReplSetTest.State.SECONDARY);
-
- assert.soonNoExcept(function() {
- assertCurrentTerm(node0, term);
- return true;
- });
-
- jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command, " +
- "expecting failure in old term");
- var response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: true,
- term: term - 1,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(!response.voteGranted,
- "node granted vote in term before last vote doc: " + tojson(response));
- assertNodeHasLastVote(node0, term, rst.nodes[0]);
- assertCurrentTerm(node0, term);
+// Ensure that all ops are replicated before stepping up node 1.
+rst.awaitReplication();
- jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command in same term, " +
- "expecting success but no recording of lastVote");
- response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: true,
- term: term,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(response.voteGranted,
- "node failed to grant dryRun vote in term equal to last vote doc: " + tojson(response));
- assert.eq(response.reason,
- "",
- "replSetRequestVotes response had the wrong reason: " + tojson(response));
- assertNodeHasLastVote(node0, term, rst.nodes[0]);
- assertCurrentTerm(node0, term);
+// We cannot reconfig node 0 to have priority 0 if it is currently the primary,
+// so we make sure node 1 is primary.
+jsTestLog("Stepping up node 1");
+rst.stepUp(rst.nodes[1]);
- jsTestLog(
- "Manually sending node 0 a replSetRequestVotes command, expecting failure in same term");
- response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: false,
- term: term,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(!response.voteGranted,
- "node granted vote in term of last vote doc: " + tojson(response));
- assertNodeHasLastVote(node0, term, rst.nodes[0]);
- assertCurrentTerm(node0, term);
+jsTestLog("Reconfiguring cluster to make node 0 unelectable so it stays SECONDARY on restart");
+conf = rst.getReplSetConfigFromNode();
+conf.version++;
+conf.members[0].priority = 0;
+reconfig(rst, conf);
+rst.awaitNodesAgreeOnConfigVersion();
+
+jsTestLog("Restarting node 0 as a standalone");
+var node0 = rst.restart(0, {noReplSet: true}); // Restart as a standalone node.
+jsTestLog("Stopping node 1");
+rst.stop(1); // Stop node 1 so that node 0 controls the term by itself.
+jsTestLog("Setting the lastVote on node 0 to term: " + term + " candidate: " + rst.nodes[0].host +
+ ", index: 0");
+setLastVoteDoc(node0, term, rst.nodes[0]);
- jsTestLog("Manually sending node 0 a replSetRequestVotes command, " +
- "expecting success with a recording of the new lastVote");
- response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: false,
- term: term + 1,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term + 1,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(response.voteGranted,
- "node failed to grant vote in term greater than last vote doc: " + tojson(response));
- assert.eq(response.reason,
- "",
- "replSetRequestVotes response had the wrong reason: " + tojson(response));
- assertNodeHasLastVote(node0, term + 1, rst.nodes[1]);
- assertCurrentTerm(node0, term + 1);
-
- jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command in future term, " +
- "expecting success but no recording of lastVote");
- response = assert.commandWorked(node0.adminCommand({
- replSetRequestVotes: 1,
- setName: name,
- dryRun: true,
- term: term + 2,
- candidateIndex: 1,
- configVersion: conf.version,
- lastCommittedOp: getLatestOp(node0)
- }));
- assert.eq(response.term,
- term + 2,
- "replSetRequestVotes response had the wrong term: " + tojson(response));
- assert(response.voteGranted,
- "node failed to grant vote in term greater than last vote doc: " + tojson(response));
- assert.eq(response.reason,
- "",
- "replSetRequestVotes response had the wrong reason: " + tojson(response));
- assertNodeHasLastVote(node0, term + 1, rst.nodes[1]);
- assertCurrentTerm(node0, term + 2);
-
- rst.stopSet();
+jsTestLog("Restarting node 0 in replica set mode");
+node0 = rst.restart(0); // Restart in replSet mode again.
+rst.waitForState(node0, ReplSetTest.State.SECONDARY);
+
+assert.soonNoExcept(function() {
+ assertCurrentTerm(node0, term);
+ return true;
+});
+
+jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command, " +
+ "expecting failure in old term");
+var response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: true,
+ term: term - 1,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(
+ response.term, term, "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(!response.voteGranted,
+ "node granted vote in term before last vote doc: " + tojson(response));
+assertNodeHasLastVote(node0, term, rst.nodes[0]);
+assertCurrentTerm(node0, term);
+
+jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command in same term, " +
+ "expecting success but no recording of lastVote");
+response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: true,
+ term: term,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(
+ response.term, term, "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(response.voteGranted,
+ "node failed to grant dryRun vote in term equal to last vote doc: " + tojson(response));
+assert.eq(
+ response.reason, "", "replSetRequestVotes response had the wrong reason: " + tojson(response));
+assertNodeHasLastVote(node0, term, rst.nodes[0]);
+assertCurrentTerm(node0, term);
+
+jsTestLog("Manually sending node 0 a replSetRequestVotes command, expecting failure in same term");
+response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: false,
+ term: term,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(
+ response.term, term, "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(!response.voteGranted, "node granted vote in term of last vote doc: " + tojson(response));
+assertNodeHasLastVote(node0, term, rst.nodes[0]);
+assertCurrentTerm(node0, term);
+
+jsTestLog("Manually sending node 0 a replSetRequestVotes command, " +
+ "expecting success with a recording of the new lastVote");
+response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: false,
+ term: term + 1,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(response.term,
+ term + 1,
+ "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(response.voteGranted,
+ "node failed to grant vote in term greater than last vote doc: " + tojson(response));
+assert.eq(
+ response.reason, "", "replSetRequestVotes response had the wrong reason: " + tojson(response));
+assertNodeHasLastVote(node0, term + 1, rst.nodes[1]);
+assertCurrentTerm(node0, term + 1);
+
+jsTestLog("Manually sending node 0 a dryRun replSetRequestVotes command in future term, " +
+ "expecting success but no recording of lastVote");
+response = assert.commandWorked(node0.adminCommand({
+ replSetRequestVotes: 1,
+ setName: name,
+ dryRun: true,
+ term: term + 2,
+ candidateIndex: 1,
+ configVersion: conf.version,
+ lastCommittedOp: getLatestOp(node0)
+}));
+assert.eq(response.term,
+ term + 2,
+ "replSetRequestVotes response had the wrong term: " + tojson(response));
+assert(response.voteGranted,
+ "node failed to grant vote in term greater than last vote doc: " + tojson(response));
+assert.eq(
+ response.reason, "", "replSetRequestVotes response had the wrong reason: " + tojson(response));
+assertNodeHasLastVote(node0, term + 1, rst.nodes[1]);
+assertCurrentTerm(node0, term + 2);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/lastop.js b/jstests/replsets/lastop.js
index cc368c1b95c..c1fa2ffb21f 100644
--- a/jstests/replsets/lastop.js
+++ b/jstests/replsets/lastop.js
@@ -2,126 +2,125 @@
// errors based on the preexisting data (e.g. duplicate key errors, but not parse errors).
// lastOp is used as the optime to wait for when write concern waits for replication.
(function() {
- var replTest = new ReplSetTest({name: 'testSet', nodes: 1});
- replTest.startSet();
- replTest.initiate();
+var replTest = new ReplSetTest({name: 'testSet', nodes: 1});
+replTest.startSet();
+replTest.initiate();
- var primary = replTest.getPrimary();
+var primary = replTest.getPrimary();
- // Two connections
- var m1 = new Mongo(primary.host);
- var m2 = new Mongo(primary.host);
+// Two connections
+var m1 = new Mongo(primary.host);
+var m2 = new Mongo(primary.host);
- // Do a write with m1, then a write with m2, then a no-op write with m1. m1 should have a lastOp
- // of m2's write.
+// Do a write with m1, then a write with m2, then a no-op write with m1. m1 should have a lastOp
+// of m2's write.
- assert.writeOK(m1.getCollection("test.foo").insert({m1: 1}));
- var firstOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m1.getCollection("test.foo").insert({m1: 1}));
+var firstOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 99}));
- var secondOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 99}));
+var secondOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // No-op update
- assert.writeOK(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}}));
- var noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op update
+assert.writeOK(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}}));
+var noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, secondOp);
+assert.eq(noOp, secondOp);
- assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
- var thirdOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
+var thirdOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 98}));
- var fourthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 98}));
+var fourthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // No-op delete
- assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op delete
+assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, fourthOp);
+assert.eq(noOp, fourthOp);
- // Dummy write, for a new lastOp.
- assert.writeOK(m1.getCollection("test.foo").insert({m1: 99}));
- var fifthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// Dummy write, for a new lastOp.
+assert.writeOK(m1.getCollection("test.foo").insert({m1: 99}));
+var fifthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 97}));
- var sixthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 97}));
+var sixthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // No-op find-and-modify delete
- m1.getCollection("test.foo").findAndModify({query: {m1: 1}, remove: 'true'});
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op find-and-modify delete
+m1.getCollection("test.foo").findAndModify({query: {m1: 1}, remove: 'true'});
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, sixthOp);
+assert.eq(noOp, sixthOp);
- assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
- var seventhOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
+var seventhOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 96}));
- var eighthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 96}));
+var eighthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // No-op create index.
- assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op create index.
+assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, eighthOp);
+assert.eq(noOp, eighthOp);
- assert.writeOK(m1.getCollection("test.foo").insert({_id: 1, x: 1}));
- var ninthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m1.getCollection("test.foo").insert({_id: 1, x: 1}));
+var ninthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
- var tenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
+var tenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // update with immutable field error
- assert.writeError(m1.getCollection("test.foo").update({_id: 1, x: 1}, {$set: {_id: 2}}));
- // "After applying the update to the document {_id: 1.0 , ...}, the (immutable) field '_id'
- // was found to have been altered to _id: 2.0"
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// update with immutable field error
+assert.writeError(m1.getCollection("test.foo").update({_id: 1, x: 1}, {$set: {_id: 2}}));
+// "After applying the update to the document {_id: 1.0 , ...}, the (immutable) field '_id'
+// was found to have been altered to _id: 2.0"
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, tenthOp);
+assert.eq(noOp, tenthOp);
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 992}));
- var eleventhOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 992}));
+var eleventhOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // find-and-modify immutable field error
- try {
- m1.getCollection("test.foo")
- .findAndModify({query: {_id: 1, x: 1}, update: {$set: {_id: 2}}});
- // The findAndModify shell helper should throw.
- assert(false);
- } catch (e) {
- assert.eq(e.code, 66);
- }
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// find-and-modify immutable field error
+try {
+ m1.getCollection("test.foo").findAndModify({query: {_id: 1, x: 1}, update: {$set: {_id: 2}}});
+ // The findAndModify shell helper should throw.
+ assert(false);
+} catch (e) {
+ assert.eq(e.code, 66);
+}
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, eleventhOp);
+assert.eq(noOp, eleventhOp);
- var bigString = new Array(3000).toString();
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 994, m3: bigString}));
+var bigString = new Array(3000).toString();
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 994, m3: bigString}));
- // No-op insert
- assert.writeOK(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
- var thirteenthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// No-op insert
+assert.writeOK(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
+var thirteenthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
- var fourteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
+var fourteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- // Hits DuplicateKey error and fails insert -- no-op
- assert.writeError(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// Hits DuplicateKey error and fails insert -- no-op
+assert.writeError(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, fourteenthOp);
+assert.eq(noOp, fourteenthOp);
- // Test update and delete failures in legacy write mode.
- m2.forceWriteMode('legacy');
- m1.forceWriteMode('legacy');
- m2.getCollection("test.foo").insert({m2: 995});
- var fifthteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+// Test update and delete failures in legacy write mode.
+m2.forceWriteMode('legacy');
+m1.forceWriteMode('legacy');
+m2.getCollection("test.foo").insert({m2: 995});
+var fifthteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- m1.getCollection("test.foo").remove({m1: 1});
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, fifthteenthOp);
+m1.getCollection("test.foo").remove({m1: 1});
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.eq(noOp, fifthteenthOp);
- m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 4}});
- noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.eq(noOp, fifthteenthOp);
- replTest.stopSet();
+m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 4}});
+noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
+assert.eq(noOp, fifthteenthOp);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/libs/election_handoff.js b/jstests/replsets/libs/election_handoff.js
index 2edbf122af6..f05e6b702d7 100644
--- a/jstests/replsets/libs/election_handoff.js
+++ b/jstests/replsets/libs/election_handoff.js
@@ -5,7 +5,6 @@
*/
var ElectionHandoffTest = (function() {
-
load("jstests/libs/check_log.js");
load("jstests/replsets/rslib.js");
@@ -76,10 +75,10 @@ var ElectionHandoffTest = (function() {
// If there are only two nodes in the set, verify that the old primary voted "yes".
if (numNodes === 2) {
checkLog.contains(expectedCandidate,
- `skipping dry run and running for election in term ${term+1}`);
+ `skipping dry run and running for election in term ${term + 1}`);
checkLog.contains(
expectedCandidate,
- `VoteRequester(term ${term+1}) received a yes vote from ${primary.host}`);
+ `VoteRequester(term ${term + 1}) received a yes vote from ${primary.host}`);
}
rst.awaitNodesAgreeOnPrimary();
@@ -87,5 +86,4 @@ var ElectionHandoffTest = (function() {
}
return {testElectionHandoff: testElectionHandoff, stepDownPeriodSecs: kStepDownPeriodSecs};
-
})();
diff --git a/jstests/replsets/libs/initial_sync_test.js b/jstests/replsets/libs/initial_sync_test.js
index 0c19bfef057..7ec45729173 100644
--- a/jstests/replsets/libs/initial_sync_test.js
+++ b/jstests/replsets/libs/initial_sync_test.js
@@ -173,7 +173,6 @@ function InitialSyncTest(name = "InitialSyncTest", replSet, timeout) {
return true;
}
return hasCompletedInitialSync();
-
}, "initial sync did not pause or complete");
}
diff --git a/jstests/replsets/libs/initial_sync_update_missing_doc.js b/jstests/replsets/libs/initial_sync_update_missing_doc.js
index a277b357d82..9883bcc437d 100644
--- a/jstests/replsets/libs/initial_sync_update_missing_doc.js
+++ b/jstests/replsets/libs/initial_sync_update_missing_doc.js
@@ -14,7 +14,6 @@
// must be called after reInitiateSetWithSecondary, followed by
// turnOffHangBeforeGettingMissingDocFailPoint.
var reInitiateSetWithSecondary = function(replSet, secondaryConfig) {
-
const secondary = replSet.add(secondaryConfig);
secondary.setSlaveOk();
@@ -37,14 +36,12 @@ var reInitiateSetWithSecondary = function(replSet, secondaryConfig) {
'initial sync - initialSyncHangBeforeCopyingDatabases fail point enabled');
return secondary;
-
};
// Must be called after reInitiateSetWithSecondary. Turns off the
// initialSyncHangBeforeCopyingDatabases fail point so that the secondary will start copying all
// non-local databases.
var turnOffHangBeforeCopyingDatabasesFailPoint = function(secondary) {
-
assert.commandWorked(secondary.getDB('admin').runCommand(
{configureFailPoint: 'initialSyncHangBeforeCopyingDatabases', mode: 'off'}));
@@ -60,7 +57,6 @@ var turnOffHangBeforeCopyingDatabasesFailPoint = function(secondary) {
// initialSyncHangBeforeGettingMissingDocument fail point so that the secondary can check if the
// sync source has the missing document.
var turnOffHangBeforeGettingMissingDocFailPoint = function(primary, secondary, name, numInserted) {
-
if (numInserted === 0) {
// If we did not re-insert the missing document, insert an arbitrary document to move
// forward minValid even though the document was not found.
@@ -82,11 +78,9 @@ var turnOffHangBeforeGettingMissingDocFailPoint = function(primary, secondary, n
secondary, 'Missing document not found on source; presumably deleted later in oplog.');
}
checkLog.contains(secondary, 'initial sync done');
-
};
var finishAndValidate = function(replSet, name, firstOplogEnd, numInserted, numDocuments) {
-
replSet.awaitReplication();
replSet.awaitSecondaryNodes();
const dbName = 'test';
@@ -117,7 +111,6 @@ var finishAndValidate = function(replSet, name, firstOplogEnd, numInserted, numD
assert.eq(0,
secondary.getDB('local')['temp_oplog_buffer'].find().itcount(),
"Oplog buffer was not dropped after initial sync");
-
};
var updateRemove = function(sessionColl, query) {
diff --git a/jstests/replsets/libs/rename_across_dbs.js b/jstests/replsets/libs/rename_across_dbs.js
index 8ab9e148ecb..fe42cab63b6 100644
--- a/jstests/replsets/libs/rename_across_dbs.js
+++ b/jstests/replsets/libs/rename_across_dbs.js
@@ -68,18 +68,18 @@ var RenameAcrossDatabasesTest = function(options) {
protocolVersion: 1,
members: [
{
- _id: 0,
- host: hosts[0],
+ _id: 0,
+ host: hosts[0],
},
{
- _id: 1,
- host: hosts[1],
- priority: 0,
+ _id: 1,
+ host: hosts[1],
+ priority: 0,
},
{
- _id: 2,
- host: hosts[2],
- arbiterOnly: true,
+ _id: 2,
+ host: hosts[2],
+ arbiterOnly: true,
},
],
version: nextVersion,
@@ -160,5 +160,4 @@ var RenameAcrossDatabasesTest = function(options) {
_testLog('Test completed. Stopping replica set.');
replTest.stopSet();
};
-
};
diff --git a/jstests/replsets/libs/rollback_test.js b/jstests/replsets/libs/rollback_test.js
index d027154a71f..933a0e5acc3 100644
--- a/jstests/replsets/libs/rollback_test.js
+++ b/jstests/replsets/libs/rollback_test.js
@@ -222,7 +222,6 @@ function RollbackTest(name = "RollbackTest", replSet) {
`RBID is too large. current RBID: ${rbid}, last RBID: ${lastRBID}`);
return rbid === lastRBID + 1;
-
}, "Timed out waiting for RBID to increment on " + curSecondary.host);
} else {
log(`Skipping RBID check on ${curSecondary.host} because shutdowns ` +
diff --git a/jstests/replsets/libs/secondary_reads_test.js b/jstests/replsets/libs/secondary_reads_test.js
index da22f9b73b5..192421827f8 100644
--- a/jstests/replsets/libs/secondary_reads_test.js
+++ b/jstests/replsets/libs/secondary_reads_test.js
@@ -36,7 +36,6 @@ function SecondaryReadsTest(name = "secondary_reads_test") {
}
this.startSecondaryReaders = function(nReaders, readFn) {
-
let read = function() {
db.getMongo().setSlaveOk();
db = db.getSiblingDB(TestData.dbName);
@@ -70,7 +69,6 @@ function SecondaryReadsTest(name = "secondary_reads_test") {
// The returned function will return once the batch has reached the point where it has applied
// but not updated the last applied optime.
this.pauseSecondaryBatchApplication = function() {
-
clearRawMongoProgramOutput();
assert.commandWorked(
@@ -102,7 +100,7 @@ function SecondaryReadsTest(name = "secondary_reads_test") {
assert.writeOK(primaryDB.getCollection(signalColl).insert({_id: testDoneId}));
for (let i = 0; i < readers.length; i++) {
const await = readers[i];
- await();
+ await ();
print("reader " + i + " done");
}
readers = [];
diff --git a/jstests/replsets/libs/tags.js b/jstests/replsets/libs/tags.js
index fab982279e5..2f52516e4b3 100644
--- a/jstests/replsets/libs/tags.js
+++ b/jstests/replsets/libs/tags.js
@@ -56,56 +56,57 @@ var TagsTest = function(options) {
protocolVersion: 1,
members: [
{
- _id: 0,
- host: nodes[0],
- tags: {
- server: '0',
- dc: 'ny',
- ny: '1',
- rack: 'ny.rk1',
- },
+ _id: 0,
+ host: nodes[0],
+ tags: {
+ server: '0',
+ dc: 'ny',
+ ny: '1',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 1,
- host: nodes[1],
- tags: {
- server: '1',
- dc: 'ny',
- ny: '2',
- rack: 'ny.rk1',
- },
+ _id: 1,
+ host: nodes[1],
+ tags: {
+ server: '1',
+ dc: 'ny',
+ ny: '2',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 2,
- host: nodes[2],
- tags: {
- server: '2',
- dc: 'ny',
- ny: '3',
- rack: 'ny.rk2', 2: 'this',
- },
+ _id: 2,
+ host: nodes[2],
+ tags: {
+ server: '2',
+ dc: 'ny',
+ ny: '3',
+ rack: 'ny.rk2',
+ 2: 'this',
+ },
},
{
- _id: 3,
- host: nodes[3],
- priority: 0,
- tags: {
- server: '3',
- dc: 'sf',
- sf: '1',
- rack: 'sf.rk1',
- },
+ _id: 3,
+ host: nodes[3],
+ priority: 0,
+ tags: {
+ server: '3',
+ dc: 'sf',
+ sf: '1',
+ rack: 'sf.rk1',
+ },
},
{
- _id: 4,
- host: nodes[4],
- priority: 0,
- tags: {
- server: '4',
- dc: 'sf',
- sf: '2',
- rack: 'sf.rk2',
- },
+ _id: 4,
+ host: nodes[4],
+ priority: 0,
+ tags: {
+ server: '4',
+ dc: 'sf',
+ sf: '2',
+ rack: 'sf.rk2',
+ },
},
],
settings: {
@@ -171,13 +172,12 @@ var TagsTest = function(options) {
primary.forceWriteMode(options.forceWriteMode);
}
var writeConcern = {
- writeConcern:
- {w: expectedWritableNodesCount, wtimeout: replTest.kDefaultTimeoutMS}
+ writeConcern: {w: expectedWritableNodesCount, wtimeout: replTest.kDefaultTimeoutMS}
};
assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
jsTestLog('ensurePrimary - Successfully written a document to primary node (' +
- replTest.nodes[nodeId].host + ') using a write concern of w:' +
- expectedWritableNodesCount);
+ replTest.nodes[nodeId].host +
+ ') using a write concern of w:' + expectedWritableNodesCount);
return primary;
};
@@ -308,5 +308,4 @@ var TagsTest = function(options) {
replTest.stopSet();
};
-
};
diff --git a/jstests/replsets/libs/two_phase_drops.js b/jstests/replsets/libs/two_phase_drops.js
index 46b30cb9ed5..bb772012fdb 100644
--- a/jstests/replsets/libs/two_phase_drops.js
+++ b/jstests/replsets/libs/two_phase_drops.js
@@ -182,8 +182,8 @@ class TwoPhaseDropCollectionTest {
TwoPhaseDropCollectionTest.listCollections(db, {includePendingDrops: true});
TwoPhaseDropCollectionTest._testLog("Checking presence of drop-pending collection for " +
- collName + " in the collection list: " +
- tojson(collections));
+ collName +
+ " in the collection list: " + tojson(collections));
let pendingDropRegex = TwoPhaseDropCollectionTest.pendingDropRegex(collName);
return collections.find(c => pendingDropRegex.test(c.name));
diff --git a/jstests/replsets/linearizable_read_concern.js b/jstests/replsets/linearizable_read_concern.js
index 9b8dd68bd38..5984577a4ed 100644
--- a/jstests/replsets/linearizable_read_concern.js
+++ b/jstests/replsets/linearizable_read_concern.js
@@ -16,133 +16,133 @@ load('jstests/replsets/rslib.js');
load('jstests/libs/parallelTester.js');
load('jstests/libs/write_concern_util.js');
(function() {
- 'use strict';
- var send_linearizable_read = function() {
- // The primary will step down and throw an exception, which is expected.
- var coll = db.getSiblingDB("test").foo;
- jsTestLog('Sending in linearizable read in secondary thread');
- // 'isMaster' ensures that the following command fails (and returns a response rather than
- // an exception) before its connection is cut because of the primary step down. Refer to
- // SERVER-24574.
- assert.commandWorked(coll.runCommand({isMaster: 1, hangUpOnStepDown: false}));
- assert.commandFailedWithCode(
- coll.runCommand(
- {'find': 'foo', readConcern: {level: "linearizable"}, maxTimeMS: 60000}),
- ErrorCodes.InterruptedDueToReplStateChange);
- };
-
- var num_nodes = 3;
- var name = 'linearizable_read_concern';
- var replTest = new ReplSetTest({name: name, nodes: num_nodes, useBridge: true});
- var config = replTest.getReplSetConfig();
-
- // Increased election timeout to avoid having the primary step down while we are
- // testing linearizable functionality on an isolated primary.
- config.settings = {electionTimeoutMillis: 60000};
-
- replTest.startSet();
- replTest.initiate(config);
-
- // Without a sync source the heartbeat interval will be half of the election timeout, 30
- // seconds. It thus will take almost 30 seconds for the secondaries to set the primary as
- // their sync source and begin replicating.
- replTest.awaitReplication();
- var primary = replTest.getPrimary();
- var secondaries = replTest.getSecondaries();
-
- // Do a write to have something to read.
- assert.writeOK(primary.getDB("test").foo.insert(
- {"number": 7},
- {"writeConcern": {"w": "majority", "wtimeout": ReplSetTest.kDefaultTimeoutMS}}));
-
- jsTestLog("Testing linearizable readConcern parsing");
- // This command is sent to the primary, and the primary is fully connected so it should work.
- var goodRead = assert.writeOK(primary.getDB("test").runCommand(
- {'find': 'foo', readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
- assert.eq(goodRead.cursor.firstBatch[0].number, 7);
-
- // This fails because you cannot have a linearizable read command sent to a secondary.
- var badCmd = assert.commandFailed(secondaries[0].getDB("test").runCommand(
- {"find": "foo", readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
-
- assert.eq(badCmd.errmsg, "cannot satisfy linearizable read concern on non-primary node");
- assert.eq(badCmd.code, ErrorCodes.NotMaster);
-
- // This fails because you cannot specify 'afterOpTime' for linearizable read.
- var opTimeCmd = assert.commandFailed(primary.getDB("test").runCommand({
- "find": "foo",
- readConcern: {level: "linearizable", "afterOpTime": {ts: Timestamp(1, 2), t: 1}},
- "maxTimeMS": 60000
- }));
- assert.eq(opTimeCmd.errmsg, "afterOpTime not compatible with linearizable read concern");
- assert.eq(opTimeCmd.code, ErrorCodes.FailedToParse);
-
- // A $out aggregation is not allowed with readConcern level "linearizable".
- let outResult = assert.throws(() => primary.getDB("test").foo.aggregate(
- [{$out: "out"}], {readConcern: {level: "linearizable"}}));
- assert.eq(outResult.code, ErrorCodes.InvalidOptions);
-
- // A $merge aggregation is not allowed with readConcern level "linearizable".
- let mergeResult = assert.throws(
- () => primary.getDB("test").foo.aggregate(
- [{$merge: {into: "out", whenMatched: "replace", whenNotMatched: "insert"}}],
- {readConcern: {level: "linearizable"}}));
- assert.eq(mergeResult.code, ErrorCodes.InvalidOptions);
-
- primary = replTest.getPrimary();
-
- jsTestLog("Starting linearizablility testing");
-
- const cursorId = assert
- .commandWorked(primary.getDB("test").runCommand(
- {'find': 'foo', readConcern: {level: "linearizable"}, batchSize: 0}))
- .cursor.id;
- jsTestLog(
- "Setting up partitions such that the primary is isolated: [Secondary-Secondary] [Primary]");
- secondaries[0].disconnect(primary);
- secondaries[1].disconnect(primary);
-
- jsTestLog(
- "Testing to make sure that linearizable getMores will time out when the primary is isolated.");
- assert.commandWorked(primary.getDB("test").foo.insert({_id: 0, x: 0}));
+'use strict';
+var send_linearizable_read = function() {
+ // The primary will step down and throw an exception, which is expected.
+ var coll = db.getSiblingDB("test").foo;
+ jsTestLog('Sending in linearizable read in secondary thread');
+ // 'isMaster' ensures that the following command fails (and returns a response rather than
+ // an exception) before its connection is cut because of the primary step down. Refer to
+ // SERVER-24574.
+ assert.commandWorked(coll.runCommand({isMaster: 1, hangUpOnStepDown: false}));
assert.commandFailedWithCode(
- primary.getDB("test").runCommand({"getMore": cursorId, collection: "foo", batchSize: 1}),
- ErrorCodes.LinearizableReadConcernError);
-
- jsTestLog("Test that a linearizable read will timeout when the primary is isolated.");
- let findResult = primary.getDB("test").runCommand(
- {"find": "foo", "readConcern": {level: "linearizable"}, "maxTimeMS": 3000});
- assert.commandFailedWithCode(findResult, ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Testing to make sure linearizable read command does not block forever.");
-
- // Get last noop Optime before sending the linearizable read command
- // to ensure that we are waiting for the most recent noop write.
- var lastOpTimestamp = getLatestOp(primary).ts;
-
- var parallelShell = startParallelShell(send_linearizable_read, primary.port);
- // Sending a linearizable read implicitly replicates a noop to the secondaries. We need to find
- // the most recently issued noop to ensure that we call stepdown during the recently
- // issued linearizable read and not before the read (in the separate thread) has been called.
- jsTestLog("Checking end of oplog for noop");
- assert.soon(function() {
- var isEarlierTimestamp = function(ts1, ts2) {
- if (ts1.getTime() == ts2.getTime()) {
- return ts1.getInc() < ts2.getInc();
- }
- return ts1.getTime() < ts2.getTime();
- };
- var latestOp = getLatestOp(primary);
- if (latestOp.op == "n" && isEarlierTimestamp(lastOpTimestamp, latestOp.ts)) {
- return true;
+ coll.runCommand({'find': 'foo', readConcern: {level: "linearizable"}, maxTimeMS: 60000}),
+ ErrorCodes.InterruptedDueToReplStateChange);
+};
+
+var num_nodes = 3;
+var name = 'linearizable_read_concern';
+var replTest = new ReplSetTest({name: name, nodes: num_nodes, useBridge: true});
+var config = replTest.getReplSetConfig();
+
+// Increased election timeout to avoid having the primary step down while we are
+// testing linearizable functionality on an isolated primary.
+config.settings = {
+ electionTimeoutMillis: 60000
+};
+
+replTest.startSet();
+replTest.initiate(config);
+
+// Without a sync source the heartbeat interval will be half of the election timeout, 30
+// seconds. It thus will take almost 30 seconds for the secondaries to set the primary as
+// their sync source and begin replicating.
+replTest.awaitReplication();
+var primary = replTest.getPrimary();
+var secondaries = replTest.getSecondaries();
+
+// Do a write to have something to read.
+assert.writeOK(primary.getDB("test").foo.insert(
+ {"number": 7}, {"writeConcern": {"w": "majority", "wtimeout": ReplSetTest.kDefaultTimeoutMS}}));
+
+jsTestLog("Testing linearizable readConcern parsing");
+// This command is sent to the primary, and the primary is fully connected so it should work.
+var goodRead = assert.writeOK(primary.getDB("test").runCommand(
+ {'find': 'foo', readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
+assert.eq(goodRead.cursor.firstBatch[0].number, 7);
+
+// This fails because you cannot have a linearizable read command sent to a secondary.
+var badCmd = assert.commandFailed(secondaries[0].getDB("test").runCommand(
+ {"find": "foo", readConcern: {level: "linearizable"}, "maxTimeMS": 60000}));
+
+assert.eq(badCmd.errmsg, "cannot satisfy linearizable read concern on non-primary node");
+assert.eq(badCmd.code, ErrorCodes.NotMaster);
+
+// This fails because you cannot specify 'afterOpTime' for linearizable read.
+var opTimeCmd = assert.commandFailed(primary.getDB("test").runCommand({
+ "find": "foo",
+ readConcern: {level: "linearizable", "afterOpTime": {ts: Timestamp(1, 2), t: 1}},
+ "maxTimeMS": 60000
+}));
+assert.eq(opTimeCmd.errmsg, "afterOpTime not compatible with linearizable read concern");
+assert.eq(opTimeCmd.code, ErrorCodes.FailedToParse);
+
+// A $out aggregation is not allowed with readConcern level "linearizable".
+let outResult = assert.throws(() => primary.getDB("test").foo.aggregate(
+ [{$out: "out"}], {readConcern: {level: "linearizable"}}));
+assert.eq(outResult.code, ErrorCodes.InvalidOptions);
+
+// A $merge aggregation is not allowed with readConcern level "linearizable".
+let mergeResult =
+ assert.throws(() => primary.getDB("test").foo.aggregate(
+ [{$merge: {into: "out", whenMatched: "replace", whenNotMatched: "insert"}}],
+ {readConcern: {level: "linearizable"}}));
+assert.eq(mergeResult.code, ErrorCodes.InvalidOptions);
+
+primary = replTest.getPrimary();
+
+jsTestLog("Starting linearizablility testing");
+
+const cursorId = assert
+ .commandWorked(primary.getDB("test").runCommand(
+ {'find': 'foo', readConcern: {level: "linearizable"}, batchSize: 0}))
+ .cursor.id;
+jsTestLog(
+ "Setting up partitions such that the primary is isolated: [Secondary-Secondary] [Primary]");
+secondaries[0].disconnect(primary);
+secondaries[1].disconnect(primary);
+
+jsTestLog(
+ "Testing to make sure that linearizable getMores will time out when the primary is isolated.");
+assert.commandWorked(primary.getDB("test").foo.insert({_id: 0, x: 0}));
+assert.commandFailedWithCode(
+ primary.getDB("test").runCommand({"getMore": cursorId, collection: "foo", batchSize: 1}),
+ ErrorCodes.LinearizableReadConcernError);
+
+jsTestLog("Test that a linearizable read will timeout when the primary is isolated.");
+let findResult = primary.getDB("test").runCommand(
+ {"find": "foo", "readConcern": {level: "linearizable"}, "maxTimeMS": 3000});
+assert.commandFailedWithCode(findResult, ErrorCodes.MaxTimeMSExpired);
+
+jsTestLog("Testing to make sure linearizable read command does not block forever.");
+
+// Get last noop Optime before sending the linearizable read command
+// to ensure that we are waiting for the most recent noop write.
+var lastOpTimestamp = getLatestOp(primary).ts;
+
+var parallelShell = startParallelShell(send_linearizable_read, primary.port);
+// Sending a linearizable read implicitly replicates a noop to the secondaries. We need to find
+// the most recently issued noop to ensure that we call stepdown during the recently
+// issued linearizable read and not before the read (in the separate thread) has been called.
+jsTestLog("Checking end of oplog for noop");
+assert.soon(function() {
+ var isEarlierTimestamp = function(ts1, ts2) {
+ if (ts1.getTime() == ts2.getTime()) {
+ return ts1.getInc() < ts2.getInc();
}
-
- return false;
- });
- assert.eq(primary, replTest.getPrimary(), "Primary unexpectedly changed mid test.");
- jsTestLog("Making Primary step down");
- assert.commandWorked(primary.adminCommand(
- {"replSetStepDown": 100, secondaryCatchUpPeriodSecs: 0, "force": true}));
- parallelShell();
- replTest.stopSet();
+ return ts1.getTime() < ts2.getTime();
+ };
+ var latestOp = getLatestOp(primary);
+ if (latestOp.op == "n" && isEarlierTimestamp(lastOpTimestamp, latestOp.ts)) {
+ return true;
+ }
+
+ return false;
+});
+assert.eq(primary, replTest.getPrimary(), "Primary unexpectedly changed mid test.");
+jsTestLog("Making Primary step down");
+assert.commandWorked(
+ primary.adminCommand({"replSetStepDown": 100, secondaryCatchUpPeriodSecs: 0, "force": true}));
+parallelShell();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/localhost1.js b/jstests/replsets/localhost1.js
index d44f9f6b2ba..27fa1bc6cf9 100644
--- a/jstests/replsets/localhost1.js
+++ b/jstests/replsets/localhost1.js
@@ -1,16 +1,16 @@
// Test ReplSet default initiate with localhost-only binding
(function() {
- 'use strict';
+'use strict';
- // Select localhost when binding to localhost
- const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
- const primary = rt.startSet({bind_ip: "127.0.0.1"})[0];
- const db = primary.getDB('admin');
- const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
- assert(resp.me.startsWith('127.0.0.1'), tojson(resp.me) + " does not start with 127.0.0.1:");
+// Select localhost when binding to localhost
+const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
+const primary = rt.startSet({bind_ip: "127.0.0.1"})[0];
+const db = primary.getDB('admin');
+const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
+assert(resp.me.startsWith('127.0.0.1'), tojson(resp.me) + " does not start with 127.0.0.1:");
- // Wait for the primary to complete its election before shutting down the set.
- assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
- rt.stopSet();
+// Wait for the primary to complete its election before shutting down the set.
+assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
+rt.stopSet();
})();
diff --git a/jstests/replsets/localhost2.js b/jstests/replsets/localhost2.js
index aa3655dd1a2..d8078d0abbb 100644
--- a/jstests/replsets/localhost2.js
+++ b/jstests/replsets/localhost2.js
@@ -1,20 +1,18 @@
// Test ReplSet default initiate with 0.0.0.0 binding
(function() {
- 'use strict';
+'use strict';
- // Select localhost when binding to localhost
- const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
- const primary = rt.startSet({bind_ip: "0.0.0.0"})[0];
- const db = primary.getDB('admin');
- const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
- assert(!resp.me.startsWith('127.0.0.1:'),
- tojson(resp.me) + " should not start with 127.0.0.1:");
- assert(!resp.me.startsWith('0.0.0.0:'), tojson(resp.me) + " should not start with 0.0.0.0:");
- assert(!resp.me.startsWith('localhost:'),
- tojson(resp.me) + " should not start with localhost:");
+// Select localhost when binding to localhost
+const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
+const primary = rt.startSet({bind_ip: "0.0.0.0"})[0];
+const db = primary.getDB('admin');
+const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
+assert(!resp.me.startsWith('127.0.0.1:'), tojson(resp.me) + " should not start with 127.0.0.1:");
+assert(!resp.me.startsWith('0.0.0.0:'), tojson(resp.me) + " should not start with 0.0.0.0:");
+assert(!resp.me.startsWith('localhost:'), tojson(resp.me) + " should not start with localhost:");
- // Wait for the primary to complete its election before shutting down the set.
- assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
- rt.stopSet();
+// Wait for the primary to complete its election before shutting down the set.
+assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
+rt.stopSet();
})();
diff --git a/jstests/replsets/localhost3.js b/jstests/replsets/localhost3.js
index 4f46505aaa6..aa452a05eef 100644
--- a/jstests/replsets/localhost3.js
+++ b/jstests/replsets/localhost3.js
@@ -1,16 +1,16 @@
// Test ReplSet default initiate with localhost-only binding
(function() {
- 'use strict';
+'use strict';
- // Select localhost when binding to localhost
- const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
- const primary = rt.startSet({bind_ip: undefined})[0];
- const db = primary.getDB('admin');
- const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
- assert(resp.me.startsWith('localhost:'), tojson(resp.me) + " should start with localhost:");
+// Select localhost when binding to localhost
+const rt = new ReplSetTest({name: "rsLocal", nodes: 1});
+const primary = rt.startSet({bind_ip: undefined})[0];
+const db = primary.getDB('admin');
+const resp = assert.commandWorked(db.adminCommand({replSetInitiate: undefined}));
+assert(resp.me.startsWith('localhost:'), tojson(resp.me) + " should start with localhost:");
- // Wait for the primary to complete its election before shutting down the set.
- assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
- rt.stopSet();
+// Wait for the primary to complete its election before shutting down the set.
+assert.soon(() => db.runCommand({ismaster: 1}).ismaster);
+rt.stopSet();
})();
diff --git a/jstests/replsets/log_secondary_oplog_application.js b/jstests/replsets/log_secondary_oplog_application.js
index 0dbd4037068..a7b34a4f05a 100644
--- a/jstests/replsets/log_secondary_oplog_application.js
+++ b/jstests/replsets/log_secondary_oplog_application.js
@@ -6,76 +6,75 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/libs/check_log.js");
- let name = "log_secondary_oplog_application";
- let rst = ReplSetTest({name: name, nodes: 2});
- rst.startSet();
+let name = "log_secondary_oplog_application";
+let rst = ReplSetTest({name: name, nodes: 2});
+rst.startSet();
- let nodes = rst.nodeList();
- rst.initiate({
- "_id": name,
- "members": [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1], "priority": 0}]
- });
+let nodes = rst.nodeList();
+rst.initiate({
+ "_id": name,
+ "members": [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1], "priority": 0}]
+});
- let primary = rst.getPrimary();
- let secondary = rst.getSecondary();
+let primary = rst.getPrimary();
+let secondary = rst.getSecondary();
- /**
- * Part 1: Issue a fast op and make sure that we do *not* log it.
- * We ensure the op is always considered fast by vastly increasing the "slowMS" threshold.
- */
-
- // Create collection explicitly so the insert doesn't have to do it.
- assert.commandWorked(primary.getDB(name).createCollection("fastOp"));
- rst.awaitReplication();
+/**
+ * Part 1: Issue a fast op and make sure that we do *not* log it.
+ * We ensure the op is always considered fast by vastly increasing the "slowMS" threshold.
+ */
- // Set "slowMS" to a very high value (in milliseconds).
- assert.commandWorked(secondary.getDB(name).setProfilingLevel(1, 60 * 60 * 1000));
+// Create collection explicitly so the insert doesn't have to do it.
+assert.commandWorked(primary.getDB(name).createCollection("fastOp"));
+rst.awaitReplication();
- // Issue a write and make sure we replicate it.
- assert.commandWorked(primary.getDB(name)["fastOp"].insert({"fast": "cheetah"}));
- rst.awaitReplication();
+// Set "slowMS" to a very high value (in milliseconds).
+assert.commandWorked(secondary.getDB(name).setProfilingLevel(1, 60 * 60 * 1000));
- // The op should not have been logged.
- assert.throws(function() {
- checkLog.contains(secondary, "cheetah", 1 * 1000);
- });
+// Issue a write and make sure we replicate it.
+assert.commandWorked(primary.getDB(name)["fastOp"].insert({"fast": "cheetah"}));
+rst.awaitReplication();
- /**
- * Part 2: Issue a slow op and make sure that we *do* log it.
- * We use a failpoint in SyncTail::syncApply which blocks after we read the time at the start
- * of the application of the op, and we wait there to simulate slowness.
- */
+// The op should not have been logged.
+assert.throws(function() {
+ checkLog.contains(secondary, "cheetah", 1 * 1000);
+});
- // Create collection explicitly so the insert doesn't have to do it.
- assert.commandWorked(primary.getDB(name).createCollection("slowOp"));
- rst.awaitReplication();
+/**
+ * Part 2: Issue a slow op and make sure that we *do* log it.
+ * We use a failpoint in SyncTail::syncApply which blocks after we read the time at the start
+ * of the application of the op, and we wait there to simulate slowness.
+ */
- // Set "slowMS" to a low value (in milliseconds).
- assert.commandWorked(secondary.getDB(name).setProfilingLevel(1, 20));
+// Create collection explicitly so the insert doesn't have to do it.
+assert.commandWorked(primary.getDB(name).createCollection("slowOp"));
+rst.awaitReplication();
- // Hang right after taking note of the start time of the application.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "hangAfterRecordingOpApplicationStartTime", mode: "alwaysOn"}));
+// Set "slowMS" to a low value (in milliseconds).
+assert.commandWorked(secondary.getDB(name).setProfilingLevel(1, 20));
- // Issue a write and make sure we've hit the failpoint before moving on.
- assert.commandWorked(primary.getDB(name)["slowOp"].insert({"slow": "sloth"}));
- checkLog.contains(secondary,
- "syncApply - fail point hangAfterRecordingOpApplicationStartTime enabled");
+// Hang right after taking note of the start time of the application.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "hangAfterRecordingOpApplicationStartTime", mode: "alwaysOn"}));
- // Wait for an amount of time safely above the "slowMS" we set.
- sleep(0.5 * 1000);
+// Issue a write and make sure we've hit the failpoint before moving on.
+assert.commandWorked(primary.getDB(name)["slowOp"].insert({"slow": "sloth"}));
+checkLog.contains(secondary,
+ "syncApply - fail point hangAfterRecordingOpApplicationStartTime enabled");
- // Disable the failpoint so the op finish can applying.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "hangAfterRecordingOpApplicationStartTime", mode: "off"}));
+// Wait for an amount of time safely above the "slowMS" we set.
+sleep(0.5 * 1000);
- // Make sure we log that insert op.
- rst.awaitReplication();
- checkLog.contains(secondary, "sloth");
+// Disable the failpoint so the op finish can applying.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "hangAfterRecordingOpApplicationStartTime", mode: "off"}));
- rst.stopSet();
+// Make sure we log that insert op.
+rst.awaitReplication();
+checkLog.contains(secondary, "sloth");
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/maintenance2.js b/jstests/replsets/maintenance2.js
index 8dd17e61a37..a2d2c3f7674 100644
--- a/jstests/replsets/maintenance2.js
+++ b/jstests/replsets/maintenance2.js
@@ -1,49 +1,49 @@
// Test that certain operations fail in recovery mode.
(function() {
- "use strict";
+"use strict";
- // Replica set testing API
- // Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
+// Replica set testing API
+// Create a new replica set test. Specify set name and the number of nodes you want.
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
- // call startSet() to start each mongod in the replica set
- // this returns a list of nodes
- var nodes = replTest.startSet();
+// call startSet() to start each mongod in the replica set
+// this returns a list of nodes
+var nodes = replTest.startSet();
- // Call initiate() to send the replSetInitiate command
- // This will wait for initiation
- replTest.initiate();
+// Call initiate() to send the replSetInitiate command
+// This will wait for initiation
+replTest.initiate();
- // Call getPrimary to return a reference to the node that's been
- // elected master.
- var master = replTest.getPrimary();
+// Call getPrimary to return a reference to the node that's been
+// elected master.
+var master = replTest.getPrimary();
- // save some records
- var len = 100;
- for (var i = 0; i < len; ++i) {
- master.getDB("foo").foo.save({a: i});
- }
+// save some records
+var len = 100;
+for (var i = 0; i < len; ++i) {
+ master.getDB("foo").foo.save({a: i});
+}
- // This method will check the oplogs of the master
- // and slaves in the set and wait until the change has replicated.
- // replTest.awaitReplication();
+// This method will check the oplogs of the master
+// and slaves in the set and wait until the change has replicated.
+// replTest.awaitReplication();
- var slaves = replTest._slaves;
- assert.eq(2, slaves.length, "Expected 2 slaves but length was " + slaves.length);
+var slaves = replTest._slaves;
+assert.eq(2, slaves.length, "Expected 2 slaves but length was " + slaves.length);
- slaves.forEach(function(slave) {
- // put slave into maintenance (recovery) mode
- slave.getDB("foo").adminCommand({replSetMaintenance: 1});
+slaves.forEach(function(slave) {
+ // put slave into maintenance (recovery) mode
+ slave.getDB("foo").adminCommand({replSetMaintenance: 1});
- var stats = slave.getDB("foo").adminCommand({replSetGetStatus: 1});
- assert.eq(stats.myState, 3, "Slave should be in recovering state.");
+ var stats = slave.getDB("foo").adminCommand({replSetGetStatus: 1});
+ assert.eq(stats.myState, 3, "Slave should be in recovering state.");
- print("count should fail in recovering state...");
- slave.slaveOk = true;
- assert.commandFailed(slave.getDB("foo").runCommand({count: "foo"}));
- });
+ print("count should fail in recovering state...");
+ slave.slaveOk = true;
+ assert.commandFailed(slave.getDB("foo").runCommand({count: "foo"}));
+});
- // Shut down the set and finish the test.
- replTest.stopSet();
+// Shut down the set and finish the test.
+replTest.stopSet();
}());
diff --git a/jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js b/jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js
index 87d5d5c8b29..5d0d962cda8 100644
--- a/jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js
+++ b/jstests/replsets/majority_writes_wait_for_all_durable_timestamp.js
@@ -6,58 +6,56 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- function assertWriteConcernTimeout(result) {
- assert.writeErrorWithCode(result, ErrorCodes.WriteConcernFailed);
- assert(result.hasWriteConcernError(), tojson(result));
- assert(result.getWriteConcernError().errInfo.wtimeout, tojson(result));
- }
-
- const rst = new ReplSetTest({name: "writes_wait_for_all_durable", nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const dbName = "test";
- const collName = "majority_writes_wait_for_all_durable";
- const testDB = primary.getDB(dbName);
- const testColl = testDB[collName];
-
- TestData.dbName = dbName;
- TestData.collName = collName;
-
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "hangAfterCollectionInserts",
- mode: "alwaysOn",
- data: {collectionNS: testColl.getFullName(), first_id: "b"}
- }));
-
- jsTestLog(
- "Insert a document to hang before the insert completes to hold back the all durable timestamp.");
- const joinHungWrite = startParallelShell(() => {
- assert.commandWorked(
- db.getSiblingDB(TestData.dbName)[TestData.collName].insert({_id: "b"}));
- }, primary.port);
- jsTestLog("Checking that the log contains fail point enabled.");
- checkLog.contains(
- testDB.getMongo(),
- "hangAfterCollectionInserts fail point enabled for " + testColl.getFullName());
-
- try {
- jsTest.log("Do a write with majority write concern that should time out.");
- assertWriteConcernTimeout(
- testColl.insert({_id: 0}, {writeConcern: {w: "majority", wtimeout: 2 * 1000}}));
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'hangAfterCollectionInserts', mode: 'off'}));
- }
-
- joinHungWrite();
- rst.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+
+function assertWriteConcernTimeout(result) {
+ assert.writeErrorWithCode(result, ErrorCodes.WriteConcernFailed);
+ assert(result.hasWriteConcernError(), tojson(result));
+ assert(result.getWriteConcernError().errInfo.wtimeout, tojson(result));
+}
+
+const rst = new ReplSetTest({name: "writes_wait_for_all_durable", nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const dbName = "test";
+const collName = "majority_writes_wait_for_all_durable";
+const testDB = primary.getDB(dbName);
+const testColl = testDB[collName];
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "hangAfterCollectionInserts",
+ mode: "alwaysOn",
+ data: {collectionNS: testColl.getFullName(), first_id: "b"}
+}));
+
+jsTestLog(
+ "Insert a document to hang before the insert completes to hold back the all durable timestamp.");
+const joinHungWrite = startParallelShell(() => {
+ assert.commandWorked(db.getSiblingDB(TestData.dbName)[TestData.collName].insert({_id: "b"}));
+}, primary.port);
+jsTestLog("Checking that the log contains fail point enabled.");
+checkLog.contains(testDB.getMongo(),
+ "hangAfterCollectionInserts fail point enabled for " + testColl.getFullName());
+
+try {
+ jsTest.log("Do a write with majority write concern that should time out.");
+ assertWriteConcernTimeout(
+ testColl.insert({_id: 0}, {writeConcern: {w: "majority", wtimeout: 2 * 1000}}));
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'hangAfterCollectionInserts', mode: 'off'}));
+}
+
+joinHungWrite();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/maxSyncSourceLagSecs.js b/jstests/replsets/maxSyncSourceLagSecs.js
index 3663972d1fa..2e71d43dd70 100644
--- a/jstests/replsets/maxSyncSourceLagSecs.js
+++ b/jstests/replsets/maxSyncSourceLagSecs.js
@@ -3,54 +3,54 @@
// This test requires the fsync command to ensure members experience a delay.
// @tags: [requires_fsync]
(function() {
- "use strict";
- load("jstests/replsets/rslib.js");
-
- var name = "maxSyncSourceLagSecs";
- var replTest = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {priority: 3}},
- {rsConfig: {priority: 0}},
- {rsConfig: {priority: 0}, setParameter: 'maxSyncSourceLagSecs=3'},
- ],
- oplogSize: 5,
- });
- var nodes = replTest.nodeList();
- replTest.startSet();
- replTest.initiate();
- replTest.awaitNodesAgreeOnPrimary();
-
- var master = replTest.getPrimary();
- var slaves = replTest._slaves;
- syncFrom(slaves[0], master, replTest);
- syncFrom(slaves[1], master, replTest);
- master.getDB("foo").bar.save({a: 1});
- replTest.awaitReplication();
-
- jsTestLog("Setting sync target of slave 2 to slave 1");
- syncFrom(slaves[1], slaves[0], replTest);
- printjson(replTest.status());
-
- // need to put at least maxSyncSourceLagSecs b/w first op and subsequent ops
- // so that the shouldChangeSyncSource logic goes into effect
- sleep(4000);
-
- jsTestLog("Lock slave 1 and add some docs. Force sync target for slave 2 to change to primary");
- assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
-
- assert.soon(function() {
- master.getDB("foo").bar.insert({a: 2});
- var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1});
- return res.syncingTo === master.name;
- }, "sync target not changed back to primary", 100 * 1000, 2 * 1000);
- printjson(replTest.status());
-
- assert.soon(function() {
- return (slaves[1].getDB("foo").bar.count({a: 1}) > 0 &&
- slaves[1].getDB("foo").bar.count({a: 2}) > 0);
- }, "slave should have caught up after syncing to primary.");
-
- assert.commandWorked(slaves[0].getDB("admin").fsyncUnlock());
- replTest.stopSet();
+"use strict";
+load("jstests/replsets/rslib.js");
+
+var name = "maxSyncSourceLagSecs";
+var replTest = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {priority: 3}},
+ {rsConfig: {priority: 0}},
+ {rsConfig: {priority: 0}, setParameter: 'maxSyncSourceLagSecs=3'},
+ ],
+ oplogSize: 5,
+});
+var nodes = replTest.nodeList();
+replTest.startSet();
+replTest.initiate();
+replTest.awaitNodesAgreeOnPrimary();
+
+var master = replTest.getPrimary();
+var slaves = replTest._slaves;
+syncFrom(slaves[0], master, replTest);
+syncFrom(slaves[1], master, replTest);
+master.getDB("foo").bar.save({a: 1});
+replTest.awaitReplication();
+
+jsTestLog("Setting sync target of slave 2 to slave 1");
+syncFrom(slaves[1], slaves[0], replTest);
+printjson(replTest.status());
+
+// need to put at least maxSyncSourceLagSecs b/w first op and subsequent ops
+// so that the shouldChangeSyncSource logic goes into effect
+sleep(4000);
+
+jsTestLog("Lock slave 1 and add some docs. Force sync target for slave 2 to change to primary");
+assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
+
+assert.soon(function() {
+ master.getDB("foo").bar.insert({a: 2});
+ var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1});
+ return res.syncingTo === master.name;
+}, "sync target not changed back to primary", 100 * 1000, 2 * 1000);
+printjson(replTest.status());
+
+assert.soon(function() {
+ return (slaves[1].getDB("foo").bar.count({a: 1}) > 0 &&
+ slaves[1].getDB("foo").bar.count({a: 2}) > 0);
+}, "slave should have caught up after syncing to primary.");
+
+assert.commandWorked(slaves[0].getDB("admin").fsyncUnlock());
+replTest.stopSet();
}());
diff --git a/jstests/replsets/minimum_visible_with_cluster_time.js b/jstests/replsets/minimum_visible_with_cluster_time.js
index 7a30c386f73..4b9dc7aa5f2 100644
--- a/jstests/replsets/minimum_visible_with_cluster_time.js
+++ b/jstests/replsets/minimum_visible_with_cluster_time.js
@@ -5,105 +5,103 @@
* @tags: [requires_replication]
*/
(function() {
- 'use strict';
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
-
- const rst = new ReplSetTest({nodes: 1});
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
- }
+'use strict';
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+
+const rst = new ReplSetTest({nodes: 1});
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ rst.stopSet();
+ return;
+}
- rst.initiate();
- const primary = rst.getPrimary();
+rst.initiate();
+const primary = rst.getPrimary();
+const syncName = 'sync';
+const syncColl = primary.getDB(syncName).getCollection(syncName);
+assert.commandWorked(syncColl.insert({t: 'before'}));
+
+function bumpClusterTime() {
+ jsTestLog('Beginning to bump the logical clock.');
const syncName = 'sync';
- const syncColl = primary.getDB(syncName).getCollection(syncName);
- assert.commandWorked(syncColl.insert({t: 'before'}));
-
- function bumpClusterTime() {
- jsTestLog('Beginning to bump the logical clock.');
- const syncName = 'sync';
- const syncColl = db.getSiblingDB(syncName).getCollection(syncName);
- assert.eq(syncColl.find().itcount(), 1);
- assert.commandWorked(syncColl.insert({t: 'during'}));
- assert.eq(syncColl.find().itcount(), 2);
-
- let clusterTime = new Timestamp(1, 1);
- while (true) {
- const higherClusterTime = new Timestamp(clusterTime.getTime() + 20, 1);
- const res = assert.commandWorked(db.adminCommand({
- 'isMaster': 1,
- '$clusterTime': {
- 'clusterTime': higherClusterTime,
- 'signature': {
- 'hash': BinData(0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAA='),
- 'keyId': NumberLong(0)
- }
- }
- }));
- clusterTime = res.$clusterTime.clusterTime;
-
- if (syncColl.find().itcount() === 3) {
- jsTestLog('Done bumping the logical clock.');
- return;
+ const syncColl = db.getSiblingDB(syncName).getCollection(syncName);
+ assert.eq(syncColl.find().itcount(), 1);
+ assert.commandWorked(syncColl.insert({t: 'during'}));
+ assert.eq(syncColl.find().itcount(), 2);
+
+ let clusterTime = new Timestamp(1, 1);
+ while (true) {
+ const higherClusterTime = new Timestamp(clusterTime.getTime() + 20, 1);
+ const res = assert.commandWorked(db.adminCommand({
+ 'isMaster': 1,
+ '$clusterTime': {
+ 'clusterTime': higherClusterTime,
+ 'signature':
+ {'hash': BinData(0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAA='), 'keyId': NumberLong(0)}
}
+ }));
+ clusterTime = res.$clusterTime.clusterTime;
+
+ if (syncColl.find().itcount() === 3) {
+ jsTestLog('Done bumping the logical clock.');
+ return;
}
}
+}
- const clusterTimeBumper = startParallelShell(bumpClusterTime, primary.port);
- // Wait for the logical clock to begin to be bumped.
- assert.soon(() => syncColl.find().itcount() === 2);
+const clusterTimeBumper = startParallelShell(bumpClusterTime, primary.port);
+// Wait for the logical clock to begin to be bumped.
+assert.soon(() => syncColl.find().itcount() === 2);
- function doMajorityRead(coll, expectedCount) {
- const res = assert.commandWorked(coll.runCommand('find', {
- 'filter': {x: 7},
- 'readConcern': {'level': 'majority'},
- 'maxTimeMS': rst.kDefaultTimeoutMS
- }));
- // Exhaust the cursor to avoid leaking cursors on the server.
- assert.eq(expectedCount, new DBCommandCursor(coll.getDB(), res).itcount());
- }
+function doMajorityRead(coll, expectedCount) {
+ const res = assert.commandWorked(coll.runCommand('find', {
+ 'filter': {x: 7},
+ 'readConcern': {'level': 'majority'},
+ 'maxTimeMS': rst.kDefaultTimeoutMS
+ }));
+ // Exhaust the cursor to avoid leaking cursors on the server.
+ assert.eq(expectedCount, new DBCommandCursor(coll.getDB(), res).itcount());
+}
- const dbName = 'minimum_visible_with_cluster_time';
- const collName = 'foo';
+const dbName = 'minimum_visible_with_cluster_time';
+const collName = 'foo';
- for (let i = 0; i < 10; i++) {
- const collNameI = collName + i;
- jsTestLog(`Testing ${dbName}.${collNameI}`);
+for (let i = 0; i < 10; i++) {
+ const collNameI = collName + i;
+ jsTestLog(`Testing ${dbName}.${collNameI}`);
- assert.commandWorked(primary.getDB(dbName).createCollection(collNameI));
- let coll = primary.getDB(dbName).getCollection(collNameI);
+ assert.commandWorked(primary.getDB(dbName).createCollection(collNameI));
+ let coll = primary.getDB(dbName).getCollection(collNameI);
- doMajorityRead(coll, 0);
+ doMajorityRead(coll, 0);
- assert.commandWorked(coll.insert({x: 7, y: 1}));
- assert.commandWorked(
- coll.createIndex({x: 1}, {'name': 'x_1', 'expireAfterSeconds': 60 * 60 * 23}));
+ assert.commandWorked(coll.insert({x: 7, y: 1}));
+ assert.commandWorked(
+ coll.createIndex({x: 1}, {'name': 'x_1', 'expireAfterSeconds': 60 * 60 * 23}));
- doMajorityRead(coll, 1);
+ doMajorityRead(coll, 1);
- assert.commandWorked(coll.insert({x: 7, y: 2}));
- assert.commandWorked(coll.runCommand(
- 'collMod', {'index': {'keyPattern': {x: 1}, 'expireAfterSeconds': 60 * 60 * 24}}));
- doMajorityRead(coll, 2);
+ assert.commandWorked(coll.insert({x: 7, y: 2}));
+ assert.commandWorked(coll.runCommand(
+ 'collMod', {'index': {'keyPattern': {x: 1}, 'expireAfterSeconds': 60 * 60 * 24}}));
+ doMajorityRead(coll, 2);
- assert.commandWorked(coll.insert({x: 7, y: 3}));
- assert.commandWorked(coll.dropIndexes());
+ assert.commandWorked(coll.insert({x: 7, y: 3}));
+ assert.commandWorked(coll.dropIndexes());
- doMajorityRead(coll, 3);
+ doMajorityRead(coll, 3);
- assert.commandWorked(coll.insert({x: 7, y: 4}));
- const newCollNameI = collNameI + '_new';
- assert.commandWorked(coll.renameCollection(newCollNameI));
+ assert.commandWorked(coll.insert({x: 7, y: 4}));
+ const newCollNameI = collNameI + '_new';
+ assert.commandWorked(coll.renameCollection(newCollNameI));
- coll = primary.getDB(dbName).getCollection(newCollNameI);
- doMajorityRead(coll, 4);
- }
+ coll = primary.getDB(dbName).getCollection(newCollNameI);
+ doMajorityRead(coll, 4);
+}
- jsTestLog('Waiting for logical clock thread to stop.');
- assert.commandWorked(syncColl.insert({t: 'after'}));
- clusterTimeBumper();
+jsTestLog('Waiting for logical clock thread to stop.');
+assert.commandWorked(syncColl.insert({t: 'after'}));
+clusterTimeBumper();
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/mr_nonrepl_coll_in_local_db.js b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
index 2b6cd66617e..8348b65e09a 100644
--- a/jstests/replsets/mr_nonrepl_coll_in_local_db.js
+++ b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
@@ -7,77 +7,77 @@
// all collections created, and checking the oplog for entries logging the creation of each of those
// collections.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- const name = "mr_nonrepl_coll_in_local_db";
- const replSet = new ReplSetTest({name: name, nodes: 2});
- replSet.startSet();
- replSet.initiate();
+const name = "mr_nonrepl_coll_in_local_db";
+const replSet = new ReplSetTest({name: name, nodes: 2});
+replSet.startSet();
+replSet.initiate();
- const dbName = name;
- const collName = "test";
+const dbName = name;
+const collName = "test";
- const primary = replSet.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const coll = primaryDB[collName];
+const primary = replSet.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const coll = primaryDB[collName];
- // Insert 1000 documents in the "test" collection.
- const bulk = coll.initializeUnorderedBulkOp();
- for (let i = 0; i < 1000; i++) {
- const array = Array.from({lengthToInsert: 10000}, _ => Math.floor(Math.random() * 100));
- bulk.insert({arr: array});
- }
- assert.writeOK(bulk.execute());
+// Insert 1000 documents in the "test" collection.
+const bulk = coll.initializeUnorderedBulkOp();
+for (let i = 0; i < 1000; i++) {
+ const array = Array.from({lengthToInsert: 10000}, _ => Math.floor(Math.random() * 100));
+ bulk.insert({arr: array});
+}
+assert.writeOK(bulk.execute());
- // Run a simple map-reduce.
- const result = coll.mapReduce(
- function map() {
- return this.arr.forEach(element => emit(element, 1));
- },
- function reduce(key, values) {
- return Array.sum(values);
- },
- {query: {arr: {$exists: true}}, out: "mr_result"});
- assert.commandWorked(result);
+// Run a simple map-reduce.
+const result = coll.mapReduce(
+ function map() {
+ return this.arr.forEach(element => emit(element, 1));
+ },
+ function reduce(key, values) {
+ return Array.sum(values);
+ },
+ {query: {arr: {$exists: true}}, out: "mr_result"});
+assert.commandWorked(result);
- // Examine the logs to find a list of created collections.
- const logLines = checkLog.getGlobalLog(primaryDB);
- let createdCollections = [];
- logLines.forEach(function(line) {
- let matchResult = line.match(/createCollection: (.+) with/);
- if (matchResult) {
- createdCollections.push(matchResult[1]);
- }
- });
+// Examine the logs to find a list of created collections.
+const logLines = checkLog.getGlobalLog(primaryDB);
+let createdCollections = [];
+logLines.forEach(function(line) {
+ let matchResult = line.match(/createCollection: (.+) with/);
+ if (matchResult) {
+ createdCollections.push(matchResult[1]);
+ }
+});
- createdCollections.forEach(function(createdCollectionName) {
- if (createdCollectionName.startsWith("admin.")) {
- // Although the "admin.system.version" collection is replicated, no "c" entry gets
- // created for it in the oplog, so this test would see it as unreplicated. In general,
- // this test is not concerned with the "admin" database, so we don't examine any "admin"
- // collections.
- return;
- }
+createdCollections.forEach(function(createdCollectionName) {
+ if (createdCollectionName.startsWith("admin.")) {
+ // Although the "admin.system.version" collection is replicated, no "c" entry gets
+ // created for it in the oplog, so this test would see it as unreplicated. In general,
+ // this test is not concerned with the "admin" database, so we don't examine any "admin"
+ // collections.
+ return;
+ }
- // Search for a log entry for the creation of this collection.
- const oplogEntries = primaryDB.getSiblingDB("local")["oplog.rs"]
- .find({op: "c", "o.idIndex.ns": createdCollectionName})
- .toArray();
- if (createdCollectionName.startsWith("local.")) {
- // We do not want to see any replication of "local" collections.
- assert.eq(oplogEntries.length,
- 0,
- "Found unexpected oplog entry for creation of " + createdCollectionName +
- ": " + tojson(oplogEntries));
- } else {
- assert.eq(oplogEntries.length,
- 1,
- "Found no oplog entry or too many entries for creation of " +
- createdCollectionName + ": " + tojson(oplogEntries));
- }
- });
+ // Search for a log entry for the creation of this collection.
+ const oplogEntries = primaryDB.getSiblingDB("local")["oplog.rs"]
+ .find({op: "c", "o.idIndex.ns": createdCollectionName})
+ .toArray();
+ if (createdCollectionName.startsWith("local.")) {
+ // We do not want to see any replication of "local" collections.
+ assert.eq(oplogEntries.length,
+ 0,
+ "Found unexpected oplog entry for creation of " + createdCollectionName + ": " +
+ tojson(oplogEntries));
+ } else {
+ assert.eq(oplogEntries.length,
+ 1,
+ "Found no oplog entry or too many entries for creation of " +
+ createdCollectionName + ": " + tojson(oplogEntries));
+ }
+});
- replSet.stopSet();
+replSet.stopSet();
}());
diff --git a/jstests/replsets/multikey_write_avoids_prepare_conflict.js b/jstests/replsets/multikey_write_avoids_prepare_conflict.js
index 46d58e0b6ca..9b760039e2f 100644
--- a/jstests/replsets/multikey_write_avoids_prepare_conflict.js
+++ b/jstests/replsets/multikey_write_avoids_prepare_conflict.js
@@ -7,52 +7,52 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({name: 'multikey_write_avoids_prepare_conflict', nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const dbName = "test";
- const collName = "coll";
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
- const primaryColl = primary.getDB(dbName)[collName];
-
- jsTestLog("Creating a collection and an index on the primary, with spec {x:1}.");
- assert.commandWorked(primaryColl.createIndex({x: 1}));
- replTest.awaitReplication();
-
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- jsTestLog("Preparing a transaction on primary that should set the multikey flag.");
- session.startTransaction();
- // This write should update the multikey flag in the catalog but we don't want it to generate
- // prepare conflicts. In general, it is always safe to set an index as multikey earlier than is
- // necessary.
- assert.commandWorked(sessionColl.insert({x: [1, 2]}));
- PrepareHelpers.prepareTransaction(session);
-
- jsTestLog("Switching primaries by stepping up node " + secondary);
- replTest.stepUp(secondary);
- const newPrimary = replTest.getPrimary();
- const newPrimaryColl = newPrimary.getDB(dbName)[collName];
-
- jsTestLog("Doing an insert on the new primary that should also try to set the multikey flag.");
- assert.commandWorked(newPrimaryColl.insert({x: [3, 4]}));
- replTest.awaitReplication();
-
- jsTestLog("Aborting the prepared transaction on session " + tojson(session.getSessionId()));
- assert.commandWorked(newPrimary.adminCommand({
- abortTransaction: 1,
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- autocommit: false
- }));
-
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({name: 'multikey_write_avoids_prepare_conflict', nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = "test";
+const collName = "coll";
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+const primaryColl = primary.getDB(dbName)[collName];
+
+jsTestLog("Creating a collection and an index on the primary, with spec {x:1}.");
+assert.commandWorked(primaryColl.createIndex({x: 1}));
+replTest.awaitReplication();
+
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+jsTestLog("Preparing a transaction on primary that should set the multikey flag.");
+session.startTransaction();
+// This write should update the multikey flag in the catalog but we don't want it to generate
+// prepare conflicts. In general, it is always safe to set an index as multikey earlier than is
+// necessary.
+assert.commandWorked(sessionColl.insert({x: [1, 2]}));
+PrepareHelpers.prepareTransaction(session);
+
+jsTestLog("Switching primaries by stepping up node " + secondary);
+replTest.stepUp(secondary);
+const newPrimary = replTest.getPrimary();
+const newPrimaryColl = newPrimary.getDB(dbName)[collName];
+
+jsTestLog("Doing an insert on the new primary that should also try to set the multikey flag.");
+assert.commandWorked(newPrimaryColl.insert({x: [3, 4]}));
+replTest.awaitReplication();
+
+jsTestLog("Aborting the prepared transaction on session " + tojson(session.getSessionId()));
+assert.commandWorked(newPrimary.adminCommand({
+ abortTransaction: 1,
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ autocommit: false
+}));
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/nested_apply_ops_create_indexes.js b/jstests/replsets/nested_apply_ops_create_indexes.js
index 1349d706c37..1a4a3f9c3ea 100644
--- a/jstests/replsets/nested_apply_ops_create_indexes.js
+++ b/jstests/replsets/nested_apply_ops_create_indexes.js
@@ -2,61 +2,56 @@
* Test createIndexes while recursively locked in a nested applyOps.
*/
(function() {
- "use strict";
+"use strict";
- let ensureIndexExists = function(testDB, collName, indexName, expectedNumIndexes) {
- let cmd = {listIndexes: collName};
- let res = testDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- let indexes = testDB[collName].getIndexes();
-
- assert.eq(indexes.length, expectedNumIndexes);
-
- let foundIndex = indexes.some(index => index.name === indexName);
- assert(foundIndex,
- "did not find the index '" + indexName + "' amongst the collection indexes: " +
- tojson(indexes));
- };
-
- let rst = new ReplSetTest({nodes: 3});
- rst.startSet();
- rst.initiate();
-
- let collName = "col";
- let dbName = "nested_apply_ops_create_indexes";
-
- let primaryTestDB = rst.getPrimary().getDB(dbName);
- let cmd = {"create": collName};
- let res = primaryTestDB.runCommand(cmd);
- assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
-
- let uuid = primaryTestDB.getCollectionInfos()[0].info.uuid;
- let cmdFormatIndexNameA = "a_1";
- cmd = {
- applyOps: [{
- op: "c",
- ns: dbName + ".$cmd",
- ui: uuid,
- o: {
- applyOps: [{
- op: "c",
- ns: dbName + "." + collName,
- ui: uuid,
- o: {
- createIndexes: collName,
- v: 2,
- key: {a: 1},
- name: cmdFormatIndexNameA
- }
- }]
- }
- }]
- };
- res = primaryTestDB.runCommand(cmd);
+let ensureIndexExists = function(testDB, collName, indexName, expectedNumIndexes) {
+ let cmd = {listIndexes: collName};
+ let res = testDB.runCommand(cmd);
assert.commandWorked(res, "could not run " + tojson(cmd));
- rst.awaitReplication();
- ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameA, 2);
-
- rst.stopSet();
+ let indexes = testDB[collName].getIndexes();
+
+ assert.eq(indexes.length, expectedNumIndexes);
+
+ let foundIndex = indexes.some(index => index.name === indexName);
+ assert(foundIndex,
+ "did not find the index '" + indexName +
+ "' amongst the collection indexes: " + tojson(indexes));
+};
+
+let rst = new ReplSetTest({nodes: 3});
+rst.startSet();
+rst.initiate();
+
+let collName = "col";
+let dbName = "nested_apply_ops_create_indexes";
+
+let primaryTestDB = rst.getPrimary().getDB(dbName);
+let cmd = {"create": collName};
+let res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
+
+let uuid = primaryTestDB.getCollectionInfos()[0].info.uuid;
+let cmdFormatIndexNameA = "a_1";
+cmd = {
+ applyOps: [{
+ op: "c",
+ ns: dbName + ".$cmd",
+ ui: uuid,
+ o: {
+ applyOps: [{
+ op: "c",
+ ns: dbName + "." + collName,
+ ui: uuid,
+ o: {createIndexes: collName, v: 2, key: {a: 1}, name: cmdFormatIndexNameA}
+ }]
+ }
+ }]
+};
+res = primaryTestDB.runCommand(cmd);
+assert.commandWorked(res, "could not run " + tojson(cmd));
+rst.awaitReplication();
+ensureIndexExists(primaryTestDB, collName, cmdFormatIndexNameA, 2);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/no_disconnect_on_stepdown.js b/jstests/replsets/no_disconnect_on_stepdown.js
index 1d71788b3d6..b5a2368c293 100644
--- a/jstests/replsets/no_disconnect_on_stepdown.js
+++ b/jstests/replsets/no_disconnect_on_stepdown.js
@@ -2,106 +2,104 @@
* Tests that stepdown terminates writes, but does not disconnect connections.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/curop_helpers.js");
+load("jstests/libs/curop_helpers.js");
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const primaryAdmin = primary.getDB("admin");
- // We need a separate connection to avoid interference with the ReplSetTestMechanism.
- const primaryDataConn = new Mongo(primary.host);
- const primaryDb = primaryDataConn.getDB("test");
- const collname = "no_disconnect_on_stepdown";
- const coll = primaryDb[collname];
+const primary = rst.getPrimary();
+const primaryAdmin = primary.getDB("admin");
+// We need a separate connection to avoid interference with the ReplSetTestMechanism.
+const primaryDataConn = new Mongo(primary.host);
+const primaryDb = primaryDataConn.getDB("test");
+const collname = "no_disconnect_on_stepdown";
+const coll = primaryDb[collname];
- // Never retry on network error, because this test needs to detect the network error.
- TestData.skipRetryOnNetworkError = true;
+// Never retry on network error, because this test needs to detect the network error.
+TestData.skipRetryOnNetworkError = true;
- // Legacy writes will still disconnect, so don't use them.
- primaryDataConn.forceWriteMode('commands');
+// Legacy writes will still disconnect, so don't use them.
+primaryDataConn.forceWriteMode('commands');
- assert.commandWorked(coll.insert([
- {_id: 'update0', updateme: true},
- {_id: 'update1', updateme: true},
- {_id: 'remove0', removeme: true},
- {_id: 'remove1', removeme: true}
- ]));
- rst.awaitReplication();
+assert.commandWorked(coll.insert([
+ {_id: 'update0', updateme: true},
+ {_id: 'update1', updateme: true},
+ {_id: 'remove0', removeme: true},
+ {_id: 'remove1', removeme: true}
+]));
+rst.awaitReplication();
- jsTestLog("Stepping down with no command in progress. Should not disconnect.");
- // If the 'primary' connection is broken on stepdown, this command will fail.
- assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- // If the 'primaryDataConn' connection was broken during stepdown, this command will fail.
- assert.commandWorked(primaryDb.adminCommand({ping: 1}));
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
+jsTestLog("Stepping down with no command in progress. Should not disconnect.");
+// If the 'primary' connection is broken on stepdown, this command will fail.
+assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+// If the 'primaryDataConn' connection was broken during stepdown, this command will fail.
+assert.commandWorked(primaryDb.adminCommand({ping: 1}));
+// Allow the primary to be re-elected, and wait for it.
+assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+rst.getPrimary();
- function runStepDownTest({description, failpoint, operation, errorCode}) {
- jsTestLog(`Trying ${description} on a stepping-down primary`);
- assert.commandWorked(primaryAdmin.adminCommand({
- configureFailPoint: failpoint,
- mode: "alwaysOn",
- data: {shouldContinueOnInterrupt: true}
- }));
+function runStepDownTest({description, failpoint, operation, errorCode}) {
+ jsTestLog(`Trying ${description} on a stepping-down primary`);
+ assert.commandWorked(primaryAdmin.adminCommand({
+ configureFailPoint: failpoint,
+ mode: "alwaysOn",
+ data: {shouldContinueOnInterrupt: true}
+ }));
- errorCode = errorCode || ErrorCodes.InterruptedDueToReplStateChange;
- const writeCommand = `db.getMongo().forceWriteMode("commands");
+ errorCode = errorCode || ErrorCodes.InterruptedDueToReplStateChange;
+ const writeCommand = `db.getMongo().forceWriteMode("commands");
assert.commandFailedWithCode(${operation}, ${errorCode});
assert.commandWorked(db.adminCommand({ping:1}));`;
- const waitForShell = startParallelShell(writeCommand, primary.port);
- waitForCurOpByFilter(primaryAdmin, {"msg": failpoint});
- assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- assert.commandWorked(
- primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- try {
- waitForShell();
- } catch (ex) {
- print("Failed trying to write or ping in " + description + ", possibly disconnected.");
- throw ex;
- }
+ const waitForShell = startParallelShell(writeCommand, primary.port);
+ waitForCurOpByFilter(primaryAdmin, {"msg": failpoint});
+ assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+ assert.commandWorked(primaryAdmin.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+ try {
+ waitForShell();
+ } catch (ex) {
+ print("Failed trying to write or ping in " + description + ", possibly disconnected.");
+ throw ex;
+ }
- // Validate the number of operations killed on step down and number of failed unacknowledged
- // writes resulted in network disconnection.
- let replMetrics =
- assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
- assert.eq(replMetrics.stepDown.userOperationsKilled, 1);
- assert.eq(replMetrics.network.notMasterUnacknowledgedWrites, 0);
+ // Validate the number of operations killed on step down and number of failed unacknowledged
+ // writes resulted in network disconnection.
+ let replMetrics =
+ assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
+ assert.eq(replMetrics.stepDown.userOperationsKilled, 1);
+ assert.eq(replMetrics.network.notMasterUnacknowledgedWrites, 0);
- // Allow the primary to be re-elected, and wait for it.
- assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
- rst.getPrimary();
- }
+ // Allow the primary to be re-elected, and wait for it.
+ assert.commandWorked(primaryAdmin.adminCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+}
- // Reduce the max batch size so the insert is reliably interrupted.
- assert.commandWorked(
- primaryAdmin.adminCommand({setParameter: 1, internalInsertMaxBatchSize: 2}));
- // Make updates and removes yield more often.
- assert.commandWorked(
- primaryAdmin.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 3}));
+// Reduce the max batch size so the insert is reliably interrupted.
+assert.commandWorked(primaryAdmin.adminCommand({setParameter: 1, internalInsertMaxBatchSize: 2}));
+// Make updates and removes yield more often.
+assert.commandWorked(
+ primaryAdmin.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 3}));
- runStepDownTest({
- description: "insert",
- failpoint: "hangWithLockDuringBatchInsert",
- operation: "db['" + collname + "'].insert([{_id:0}, {_id:1}, {_id:2}])"
- });
+runStepDownTest({
+ description: "insert",
+ failpoint: "hangWithLockDuringBatchInsert",
+ operation: "db['" + collname + "'].insert([{_id:0}, {_id:1}, {_id:2}])"
+});
- runStepDownTest({
- description: "update",
- failpoint: "hangWithLockDuringBatchUpdate",
- operation: "db['" + collname + "'].update({updateme: true}, {'$set': {x: 1}})"
- });
- runStepDownTest({
- description: "remove",
- failpoint: "hangWithLockDuringBatchRemove",
- operation: "db['" + collname + "'].remove({removeme: true})"
- });
- rst.stopSet();
+runStepDownTest({
+ description: "update",
+ failpoint: "hangWithLockDuringBatchUpdate",
+ operation: "db['" + collname + "'].update({updateme: true}, {'$set': {x: 1}})"
+});
+runStepDownTest({
+ description: "remove",
+ failpoint: "hangWithLockDuringBatchRemove",
+ operation: "db['" + collname + "'].remove({removeme: true})"
+});
+rst.stopSet();
})();
diff --git a/jstests/replsets/no_flapping_during_network_partition.js b/jstests/replsets/no_flapping_during_network_partition.js
index ce88d0bd298..e6f705c5a4a 100644
--- a/jstests/replsets/no_flapping_during_network_partition.js
+++ b/jstests/replsets/no_flapping_during_network_partition.js
@@ -11,45 +11,47 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
+load("jstests/libs/check_log.js");
- var name = "no_flapping_during_network_partition";
+var name = "no_flapping_during_network_partition";
- var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- var nodes = replTest.startSet();
- var config = replTest.getReplSetConfig();
- config.members[0].priority = 5;
- config.members[2].arbiterOnly = true;
- config.settings = {electionTimeoutMillis: 2000};
- replTest.initiate(config);
+var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+var nodes = replTest.startSet();
+var config = replTest.getReplSetConfig();
+config.members[0].priority = 5;
+config.members[2].arbiterOnly = true;
+config.settings = {
+ electionTimeoutMillis: 2000
+};
+replTest.initiate(config);
- function getTerm(node) {
- return node.adminCommand({replSetGetStatus: 1}).term;
- }
+function getTerm(node) {
+ return node.adminCommand({replSetGetStatus: 1}).term;
+}
- replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY);
+replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replTest.getPrimary();
- var secondary = replTest.getSecondary();
- var initialTerm = getTerm(primary);
+var primary = replTest.getPrimary();
+var secondary = replTest.getSecondary();
+var initialTerm = getTerm(primary);
- jsTestLog("Create a network partition between the primary and secondary.");
- primary.disconnect(secondary);
+jsTestLog("Create a network partition between the primary and secondary.");
+primary.disconnect(secondary);
- jsTestLog("Wait long enough for the secondary to call for an election.");
- checkLog.contains(secondary, "can see a healthy primary");
- checkLog.contains(secondary, "not running for primary");
+jsTestLog("Wait long enough for the secondary to call for an election.");
+checkLog.contains(secondary, "can see a healthy primary");
+checkLog.contains(secondary, "not running for primary");
- jsTestLog("Verify the primary and secondary do not change during the partition.");
- assert.eq(primary, replTest.getPrimary());
- assert.eq(secondary, replTest.getSecondary());
+jsTestLog("Verify the primary and secondary do not change during the partition.");
+assert.eq(primary, replTest.getPrimary());
+assert.eq(secondary, replTest.getSecondary());
- checkLog.contains(secondary, "not running for primary");
+checkLog.contains(secondary, "not running for primary");
- jsTestLog("Heal the partition.");
- primary.reconnect(secondary);
+jsTestLog("Heal the partition.");
+primary.reconnect(secondary);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/noop_write_after_read_only_txn.js b/jstests/replsets/noop_write_after_read_only_txn.js
index a34db2d4a06..125eaf5b50f 100644
--- a/jstests/replsets/noop_write_after_read_only_txn.js
+++ b/jstests/replsets/noop_write_after_read_only_txn.js
@@ -4,88 +4,88 @@
//
// @tags: [uses_transactions]
(function() {
- "use strict";
- load('jstests/libs/write_concern_util.js');
+"use strict";
+load('jstests/libs/write_concern_util.js');
- const name = "noop_write_after_read_only_txn";
- const rst = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- });
- rst.startSet();
- rst.initiate();
+const name = "noop_write_after_read_only_txn";
+const rst = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const dbName = "test";
- const testDB = primary.getDB(dbName);
+const primary = rst.getPrimary();
+const dbName = "test";
+const testDB = primary.getDB(dbName);
- // Set up the collection.
- testDB.runCommand({drop: name, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.getCollection(name).insert({}, {writeConcern: {w: "majority"}}));
+// Set up the collection.
+testDB.runCommand({drop: name, writeConcern: {w: "majority"}});
+assert.commandWorked(testDB.getCollection(name).insert({}, {writeConcern: {w: "majority"}}));
- function runTest({readConcernLevel, shouldWrite, provokeWriteConcernError}) {
- jsTestLog(
- `Read concern level "${readConcernLevel}", shouldWrite: ${shouldWrite}, provokeWriteConcernError: ${provokeWriteConcernError}`);
+function runTest({readConcernLevel, shouldWrite, provokeWriteConcernError}) {
+ jsTestLog(`Read concern level "${readConcernLevel}", shouldWrite: ${
+ shouldWrite}, provokeWriteConcernError: ${provokeWriteConcernError}`);
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const txnOptions = {writeConcern: {w: "majority"}};
- if (readConcernLevel)
- txnOptions.readConcern = {level: readConcernLevel};
+ const session = primary.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const txnOptions = {writeConcern: {w: "majority"}};
+ if (readConcernLevel)
+ txnOptions.readConcern = {level: readConcernLevel};
- if (provokeWriteConcernError)
- txnOptions.writeConcern.wtimeout = 1000;
+ if (provokeWriteConcernError)
+ txnOptions.writeConcern.wtimeout = 1000;
- session.startTransaction(txnOptions);
- assert.commandWorked(sessionDB.runCommand({find: name}));
- if (shouldWrite)
- assert.commandWorked(sessionDB.getCollection(name).insert({}));
+ session.startTransaction(txnOptions);
+ assert.commandWorked(sessionDB.runCommand({find: name}));
+ if (shouldWrite)
+ assert.commandWorked(sessionDB.getCollection(name).insert({}));
- if (provokeWriteConcernError)
- stopReplicationOnSecondaries(rst);
+ if (provokeWriteConcernError)
+ stopReplicationOnSecondaries(rst);
- const commitResult =
- assert.commandWorkedIgnoringWriteConcernErrors(session.commitTransaction_forTesting());
+ const commitResult =
+ assert.commandWorkedIgnoringWriteConcernErrors(session.commitTransaction_forTesting());
- jsTestLog(`commitResult ${tojson(commitResult)}`);
- if (provokeWriteConcernError) {
- assertWriteConcernError(commitResult);
- } else {
- assert.commandWorked(commitResult);
- }
+ jsTestLog(`commitResult ${tojson(commitResult)}`);
+ if (provokeWriteConcernError) {
+ assertWriteConcernError(commitResult);
+ } else {
+ assert.commandWorked(commitResult);
+ }
- const entries = rst.findOplog(primary,
- {
- op: "n",
- ts: {$gte: commitResult.operationTime},
- "o.msg": /.*read-only transaction.*/
- },
- 1)
- .toArray();
+ const entries = rst.findOplog(primary,
+ {
+ op: "n",
+ ts: {$gte: commitResult.operationTime},
+ "o.msg": /.*read-only transaction.*/
+ },
+ 1)
+ .toArray();
- // If the transaction had a write, it should not *also* do a noop.
- if (shouldWrite) {
- assert.eq(0, entries.length, "shouldn't have written noop oplog entry");
- } else {
- assert.eq(1, entries.length, "should have written noop oplog entry");
- }
-
- jsTestLog("Ending session");
- session.endSession();
- restartReplSetReplication(rst);
+ // If the transaction had a write, it should not *also* do a noop.
+ if (shouldWrite) {
+ assert.eq(0, entries.length, "shouldn't have written noop oplog entry");
+ } else {
+ assert.eq(1, entries.length, "should have written noop oplog entry");
}
- for (let readConcernLevel of[null, "local", "majority", "snapshot"]) {
- for (let shouldWrite of[false, true]) {
- for (let provokeWriteConcernError of[false, true]) {
- runTest({
- readConcernLevel: readConcernLevel,
- shouldWrite: shouldWrite,
- provokeWriteConcernError: provokeWriteConcernError
- });
- }
+ jsTestLog("Ending session");
+ session.endSession();
+ restartReplSetReplication(rst);
+}
+
+for (let readConcernLevel of [null, "local", "majority", "snapshot"]) {
+ for (let shouldWrite of [false, true]) {
+ for (let provokeWriteConcernError of [false, true]) {
+ runTest({
+ readConcernLevel: readConcernLevel,
+ shouldWrite: shouldWrite,
+ provokeWriteConcernError: provokeWriteConcernError
+ });
}
}
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/noop_writes_wait_for_write_concern.js b/jstests/replsets/noop_writes_wait_for_write_concern.js
index aeccce86117..d5731c2b7c4 100644
--- a/jstests/replsets/noop_writes_wait_for_write_concern.js
+++ b/jstests/replsets/noop_writes_wait_for_write_concern.js
@@ -6,232 +6,232 @@
*/
(function() {
- "use strict";
- load('jstests/libs/write_concern_util.js');
-
- var name = 'noop_writes_wait_for_write_concern';
- var replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- });
- replTest.startSet();
- replTest.initiate();
- // Stops node 1 so that all w:3 write concerns time out. We have 3 data bearing nodes so that
- // 'dropDatabase' can satisfy its implicit writeConcern: majority but still time out from the
- // explicit w:3 write concern.
- replTest.stop(1);
-
- var primary = replTest.getPrimary();
- assert.eq(primary, replTest.nodes[0]);
- var dbName = 'testDB';
- var db = primary.getDB(dbName);
- var collName = 'testColl';
- var coll = db[collName];
-
- function dropTestCollection() {
- coll.drop();
- assert.eq(0, coll.find().itcount(), "test collection not empty");
+"use strict";
+load('jstests/libs/write_concern_util.js');
+
+var name = 'noop_writes_wait_for_write_concern';
+var replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+});
+replTest.startSet();
+replTest.initiate();
+// Stops node 1 so that all w:3 write concerns time out. We have 3 data bearing nodes so that
+// 'dropDatabase' can satisfy its implicit writeConcern: majority but still time out from the
+// explicit w:3 write concern.
+replTest.stop(1);
+
+var primary = replTest.getPrimary();
+assert.eq(primary, replTest.nodes[0]);
+var dbName = 'testDB';
+var db = primary.getDB(dbName);
+var collName = 'testColl';
+var coll = db[collName];
+
+function dropTestCollection() {
+ coll.drop();
+ assert.eq(0, coll.find().itcount(), "test collection not empty");
+}
+
+// Each entry in this array contains a command whose noop write concern behavior needs to be
+// tested. Entries have the following structure:
+// {
+// req: <object>, // Command request object that will result in a noop
+// // write after the setup function is called.
+//
+// setupFunc: <function()>, // Function to run to ensure that the request is a
+// // noop.
+//
+// confirmFunc: <function(res)>, // Function to run after the command is run to ensure
+// // that it executed properly. Accepts the result of
+// // the noop request to validate it.
+// }
+var commands = [];
+
+commands.push({
+ req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1}}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({_id: 1}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.applied, 1);
+ assert.eq(res.results[0], true);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({_id: 1}), 1);
}
-
- // Each entry in this array contains a command whose noop write concern behavior needs to be
- // tested. Entries have the following structure:
- // {
- // req: <object>, // Command request object that will result in a noop
- // // write after the setup function is called.
- //
- // setupFunc: <function()>, // Function to run to ensure that the request is a
- // // noop.
- //
- // confirmFunc: <function(res)>, // Function to run after the command is run to ensure
- // // that it executed properly. Accepts the result of
- // // the noop request to validate it.
- // }
- var commands = [];
-
- commands.push({
- req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1}}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({_id: 1}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.applied, 1);
- assert.eq(res.results[0], true);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({_id: 1}), 1);
- }
- });
-
- // 'update' where the document to update does not exist.
- commands.push({
- req: {update: collName, updates: [{q: {a: 1}, u: {b: 2}}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.update({a: 1}, {b: 2}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.n, 0);
- assert.eq(res.nModified, 0);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({b: 2}), 1);
- }
- });
-
- // 'update' where the update has already been done.
- commands.push({
- req: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 2}}}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.update({a: 1}, {$set: {b: 2}}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.n, 1);
- assert.eq(res.nModified, 0);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({a: 1, b: 2}), 1);
- }
- });
-
- commands.push({
- req: {delete: collName, deletes: [{q: {a: 1}, limit: 1}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.writeOK(coll.remove({a: 1}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.n, 0);
- assert.eq(coll.count({a: 1}), 0);
- }
- });
-
- commands.push({
- req: {createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(
- db.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.numIndexesBefore, res.numIndexesAfter);
- assert.eq(res.note, 'all indexes already exist');
- }
- });
-
- // 'findAndModify' where the document to update does not exist.
- commands.push({
- req: {findAndModify: collName, query: {a: 1}, update: {b: 2}},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(
- db.runCommand({findAndModify: collName, query: {a: 1}, update: {b: 2}}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.lastErrorObject.updatedExisting, false);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({b: 2}), 1);
- }
- });
-
- // 'findAndModify' where the update has already been done.
- commands.push({
- req: {findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(
- db.runCommand({findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assert.eq(res.lastErrorObject.updatedExisting, true);
- assert.eq(coll.find().itcount(), 1);
- assert.eq(coll.count({a: 1, b: 2}), 1);
- }
- });
-
- commands.push({
- req: {dropDatabase: 1},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({dropDatabase: 1}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- }
- });
-
- commands.push({
- req: {drop: collName},
- setupFunc: function() {
- assert.writeOK(coll.insert({a: 1}));
- assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({drop: collName}));
- },
- confirmFunc: function(res) {
- assert.commandFailedWithCode(res, ErrorCodes.NamespaceNotFound);
- }
- });
-
- commands.push({
- req: {create: collName},
- setupFunc: function() {
- assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({create: collName}));
- },
- confirmFunc: function(res) {
- assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists);
- }
- });
-
- commands.push({
- req: {insert: collName, documents: [{_id: 1}]},
- setupFunc: function() {
- assert.writeOK(coll.insert({_id: 1}));
- },
- confirmFunc: function(res) {
- assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res);
- assert.eq(res.n, 0);
- assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
- assert.eq(coll.count({_id: 1}), 1);
- }
- });
-
- function testCommandWithWriteConcern(cmd) {
- // Provide a small wtimeout that we expect to time out.
- cmd.req.writeConcern = {w: 3, wtimeout: 1000};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropTestCollection();
-
- cmd.setupFunc();
-
- // We run the command on a different connection. If the the command were run on the
- // same connection, then the client last op for the noop write would be set by the setup
- // operation. By using a fresh connection the client last op begins as null.
- // This test explicitly tests that write concern for noop writes works when the
- // client last op has not already been set by a duplicate operation.
- var shell2 = new Mongo(primary.host);
-
- // We check the error code of 'res' in the 'confirmFunc'.
- var res = shell2.getDB(dbName).runCommand(cmd.req);
-
- try {
- // Tests that the command receives a write concern error. If we don't wait for write
- // concern on noop writes then we won't get a write concern error.
- assertWriteConcernError(res);
- cmd.confirmFunc(res);
- } catch (e) {
- // Make sure that we print out the response.
- printjson(res);
- throw e;
- }
+});
+
+// 'update' where the document to update does not exist.
+commands.push({
+ req: {update: collName, updates: [{q: {a: 1}, u: {b: 2}}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.writeOK(coll.update({a: 1}, {b: 2}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.n, 0);
+ assert.eq(res.nModified, 0);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({b: 2}), 1);
+ }
+});
+
+// 'update' where the update has already been done.
+commands.push({
+ req: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 2}}}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.writeOK(coll.update({a: 1}, {$set: {b: 2}}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.n, 1);
+ assert.eq(res.nModified, 0);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({a: 1, b: 2}), 1);
+ }
+});
+
+commands.push({
+ req: {delete: collName, deletes: [{q: {a: 1}, limit: 1}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.writeOK(coll.remove({a: 1}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.n, 0);
+ assert.eq(coll.count({a: 1}), 0);
+ }
+});
+
+commands.push({
+ req: {createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(
+ db.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.numIndexesBefore, res.numIndexesAfter);
+ assert.eq(res.note, 'all indexes already exist');
+ }
+});
+
+// 'findAndModify' where the document to update does not exist.
+commands.push({
+ req: {findAndModify: collName, query: {a: 1}, update: {b: 2}},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(
+ db.runCommand({findAndModify: collName, query: {a: 1}, update: {b: 2}}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.lastErrorObject.updatedExisting, false);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({b: 2}), 1);
+ }
+});
+
+// 'findAndModify' where the update has already been done.
+commands.push({
+ req: {findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(
+ db.runCommand({findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assert.eq(res.lastErrorObject.updatedExisting, true);
+ assert.eq(coll.find().itcount(), 1);
+ assert.eq(coll.count({a: 1, b: 2}), 1);
+ }
+});
+
+commands.push({
+ req: {dropDatabase: 1},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({dropDatabase: 1}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ }
+});
+
+commands.push({
+ req: {drop: collName},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({a: 1}));
+ assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({drop: collName}));
+ },
+ confirmFunc: function(res) {
+ assert.commandFailedWithCode(res, ErrorCodes.NamespaceNotFound);
+ }
+});
+
+commands.push({
+ req: {create: collName},
+ setupFunc: function() {
+ assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({create: collName}));
+ },
+ confirmFunc: function(res) {
+ assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists);
+ }
+});
+
+commands.push({
+ req: {insert: collName, documents: [{_id: 1}]},
+ setupFunc: function() {
+ assert.writeOK(coll.insert({_id: 1}));
+ },
+ confirmFunc: function(res) {
+ assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res);
+ assert.eq(res.n, 0);
+ assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
+ assert.eq(coll.count({_id: 1}), 1);
+ }
+});
+
+function testCommandWithWriteConcern(cmd) {
+ // Provide a small wtimeout that we expect to time out.
+ cmd.req.writeConcern = {w: 3, wtimeout: 1000};
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ dropTestCollection();
+
+ cmd.setupFunc();
+
+ // We run the command on a different connection. If the the command were run on the
+ // same connection, then the client last op for the noop write would be set by the setup
+ // operation. By using a fresh connection the client last op begins as null.
+ // This test explicitly tests that write concern for noop writes works when the
+ // client last op has not already been set by a duplicate operation.
+ var shell2 = new Mongo(primary.host);
+
+ // We check the error code of 'res' in the 'confirmFunc'.
+ var res = shell2.getDB(dbName).runCommand(cmd.req);
+
+ try {
+ // Tests that the command receives a write concern error. If we don't wait for write
+ // concern on noop writes then we won't get a write concern error.
+ assertWriteConcernError(res);
+ cmd.confirmFunc(res);
+ } catch (e) {
+ // Make sure that we print out the response.
+ printjson(res);
+ throw e;
}
+}
- commands.forEach(function(cmd) {
- testCommandWithWriteConcern(cmd);
- });
+commands.forEach(function(cmd) {
+ testCommandWithWriteConcern(cmd);
+});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js b/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js
index 199999574f2..e024a9853f7 100644
--- a/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js
+++ b/jstests/replsets/noop_writes_wait_for_write_concern_fcv.js
@@ -2,71 +2,71 @@
* Tests that a no-op setFeatureCompatibilityVersion request still waits for write concern.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // assertWriteConcernError
- load("jstests/replsets/rslib.js"); // reconfig
- load("jstests/libs/feature_compatibility_version.js"); // latestFCV/lastStableFCV
+load("jstests/libs/write_concern_util.js"); // assertWriteConcernError
+load("jstests/replsets/rslib.js"); // reconfig
+load("jstests/libs/feature_compatibility_version.js"); // latestFCV/lastStableFCV
- // Start a two node replica set and set its FCV to the given version, then take down one
- // node so majority write concern can no longer be satisfied and verify that a noop setFCV
- // request times out waiting for majority write concern.
- function testFCVNoop(targetVersion) {
- jsTestLog("Testing setFeatureCompatibilityVersion with targetVersion: " + targetVersion);
+// Start a two node replica set and set its FCV to the given version, then take down one
+// node so majority write concern can no longer be satisfied and verify that a noop setFCV
+// request times out waiting for majority write concern.
+function testFCVNoop(targetVersion) {
+ jsTestLog("Testing setFeatureCompatibilityVersion with targetVersion: " + targetVersion);
- const replTest = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0}}],
- });
- replTest.startSet();
- replTest.initiate();
+ const replTest = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ });
+ replTest.startSet();
+ replTest.initiate();
- const primary = replTest.getPrimary();
- assert.eq(primary, replTest.nodes[0]);
+ const primary = replTest.getPrimary();
+ assert.eq(primary, replTest.nodes[0]);
- // Set the FCV to the given target version, to ensure calling setFCV below is a no-op.
- assert.commandWorkedIgnoringWriteConcernErrors(
- primary.adminCommand({setFeatureCompatibilityVersion: targetVersion}));
+ // Set the FCV to the given target version, to ensure calling setFCV below is a no-op.
+ assert.commandWorkedIgnoringWriteConcernErrors(
+ primary.adminCommand({setFeatureCompatibilityVersion: targetVersion}));
- // Stop one node to force commands with "majority" write concern to time out. First increase
- // the election timeout to prevent the primary from stepping down before the test is over.
- let conf = replTest.getReplSetConfigFromNode();
- conf.settings = {
- electionTimeoutMillis: 1000 * 60 * 10,
- };
- conf.version += 1;
- reconfig(replTest, conf);
+ // Stop one node to force commands with "majority" write concern to time out. First increase
+ // the election timeout to prevent the primary from stepping down before the test is over.
+ let conf = replTest.getReplSetConfigFromNode();
+ conf.settings = {
+ electionTimeoutMillis: 1000 * 60 * 10,
+ };
+ conf.version += 1;
+ reconfig(replTest, conf);
- replTest.stop(1);
+ replTest.stop(1);
- // Insert a document to ensure there is a last optime.
- assert.writeOK(primary.getDB("test").foo.insert({x: 1}));
+ // Insert a document to ensure there is a last optime.
+ assert.writeOK(primary.getDB("test").foo.insert({x: 1}));
- // We run the command on a different connection. If the the command were run on the same
- // connection, then the client last op for the noop write would be the last op of the
- // previous setFCV call. By using a fresh connection the client last op begins as null. This
- // test explicitly tests that write concern for noop writes works when the client last op
- // has not already been set by a duplicate operation.
- const shell2 = new Mongo(primary.host);
+ // We run the command on a different connection. If the the command were run on the same
+ // connection, then the client last op for the noop write would be the last op of the
+ // previous setFCV call. By using a fresh connection the client last op begins as null. This
+ // test explicitly tests that write concern for noop writes works when the client last op
+ // has not already been set by a duplicate operation.
+ const shell2 = new Mongo(primary.host);
- // Use w:1 to verify setFCV internally waits for at least write concern majority, and use a
- // small wtimeout to verify it is propagated into the internal waitForWriteConcern and will
- // allow the command to timeout.
- const res = shell2.adminCommand(
- {setFeatureCompatibilityVersion: targetVersion, writeConcern: {w: 1, wtimeout: 1000}});
+ // Use w:1 to verify setFCV internally waits for at least write concern majority, and use a
+ // small wtimeout to verify it is propagated into the internal waitForWriteConcern and will
+ // allow the command to timeout.
+ const res = shell2.adminCommand(
+ {setFeatureCompatibilityVersion: targetVersion, writeConcern: {w: 1, wtimeout: 1000}});
- try {
- // Verify the command receives a write concern error. If we don't wait for write concern
- // on noop writes then we won't get a write concern error.
- assertWriteConcernError(res);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- } catch (e) {
- printjson(res);
- throw e;
- }
-
- replTest.stopSet();
+ try {
+ // Verify the command receives a write concern error. If we don't wait for write concern
+ // on noop writes then we won't get a write concern error.
+ assertWriteConcernError(res);
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ } catch (e) {
+ printjson(res);
+ throw e;
}
- testFCVNoop(lastStableFCV);
- testFCVNoop(latestFCV);
+ replTest.stopSet();
+}
+
+testFCVNoop(lastStableFCV);
+testFCVNoop(latestFCV);
})();
diff --git a/jstests/replsets/not_master_unacknowledged_write.js b/jstests/replsets/not_master_unacknowledged_write.js
index f214a45c4d4..ac7b4cf2ef5 100644
--- a/jstests/replsets/not_master_unacknowledged_write.js
+++ b/jstests/replsets/not_master_unacknowledged_write.js
@@ -3,81 +3,82 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
-
- function getNotMasterUnackWritesCounter() {
- return assert.commandWorked(primaryDB.adminCommand({serverStatus: 1}))
- .metrics.repl.network.notMasterUnacknowledgedWrites;
- }
-
- const collName = "not_master_unacknowledged_write";
-
- var rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
- var primaryDB = primary.getDB("test");
- var secondaryDB = secondary.getDB("test");
- var primaryColl = primaryDB[collName];
- var secondaryColl = secondaryDB[collName];
-
- jsTestLog("Primary on port " + primary.port + " hangs up on unacknowledged writes");
- // Do each write method with unacknowledged write concern, "wc".
- [{name: "insertOne", fn: (wc) => secondaryColl.insertOne({}, wc)},
- {name: "insertMany", fn: (wc) => secondaryColl.insertMany([{}], wc)},
- {name: "deleteOne", fn: (wc) => secondaryColl.deleteOne({}, wc)},
- {name: "deleteMany", fn: (wc) => secondaryColl.deleteMany({}, wc)},
- {name: "updateOne", fn: (wc) => secondaryColl.updateOne({}, {$set: {x: 1}}, wc)},
- {name: "updateMany", fn: (wc) => secondaryColl.updateMany({}, {$set: {x: 1}}, wc)},
- {name: "replaceOne", fn: (wc) => secondaryColl.replaceOne({}, {}, wc)},
- ].map(({name, fn}) => {
- var result = assert.throws(function() {
- // Provoke the server to hang up.
- fn({writeConcern: {w: 0}});
- // The connection is now broken and isMaster throws a network error.
- secondary.getDB("admin").isMaster();
- }, [], "network error from " + name);
-
- assert.includes(result.toString(),
- "network error while attempting to run command 'isMaster'",
- "after " + name);
- });
-
- // Unacknowledged write in progress when a stepdown occurs provokes a hangup.
- assert.commandWorked(primaryDB.adminCommand({
- configureFailPoint: "hangAfterCollectionInserts",
- mode: "alwaysOn",
- data: {collectionNS: primaryColl.getFullName()}
- }));
-
- var command = `
+"use strict";
+
+load("jstests/libs/check_log.js");
+
+function getNotMasterUnackWritesCounter() {
+ return assert.commandWorked(primaryDB.adminCommand({serverStatus: 1}))
+ .metrics.repl.network.notMasterUnacknowledgedWrites;
+}
+
+const collName = "not_master_unacknowledged_write";
+
+var rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+var primary = rst.getPrimary();
+var secondary = rst.getSecondary();
+var primaryDB = primary.getDB("test");
+var secondaryDB = secondary.getDB("test");
+var primaryColl = primaryDB[collName];
+var secondaryColl = secondaryDB[collName];
+
+jsTestLog("Primary on port " + primary.port + " hangs up on unacknowledged writes");
+// Do each write method with unacknowledged write concern, "wc".
+[{name: "insertOne", fn: (wc) => secondaryColl.insertOne({}, wc)},
+ {name: "insertMany", fn: (wc) => secondaryColl.insertMany([{}], wc)},
+ {name: "deleteOne", fn: (wc) => secondaryColl.deleteOne({}, wc)},
+ {name: "deleteMany", fn: (wc) => secondaryColl.deleteMany({}, wc)},
+ {name: "updateOne", fn: (wc) => secondaryColl.updateOne({}, {$set: {x: 1}}, wc)},
+ {name: "updateMany", fn: (wc) => secondaryColl.updateMany({}, {$set: {x: 1}}, wc)},
+ {name: "replaceOne", fn: (wc) => secondaryColl.replaceOne({}, {}, wc)},
+].map(({name, fn}) => {
+ var result = assert.throws(function() {
+ // Provoke the server to hang up.
+ fn({writeConcern: {w: 0}});
+ // The connection is now broken and isMaster throws a network error.
+ secondary.getDB("admin").isMaster();
+ }, [], "network error from " + name);
+
+ assert.includes(result.toString(),
+ "network error while attempting to run command 'isMaster'",
+ "after " + name);
+});
+
+// Unacknowledged write in progress when a stepdown occurs provokes a hangup.
+assert.commandWorked(primaryDB.adminCommand({
+ configureFailPoint: "hangAfterCollectionInserts",
+ mode: "alwaysOn",
+ data: {collectionNS: primaryColl.getFullName()}
+}));
+
+var command =
+ `
load("jstests/libs/check_log.js");
checkLog.contains(db.getMongo(), "hangAfterCollectionInserts fail point enabled");
db.adminCommand({replSetStepDown: 60, force: true});`;
- var awaitShell = startParallelShell(command, primary.port);
+var awaitShell = startParallelShell(command, primary.port);
- let failedUnackWritesBefore = getNotMasterUnackWritesCounter();
+let failedUnackWritesBefore = getNotMasterUnackWritesCounter();
- jsTestLog("Beginning unacknowledged insert");
- primaryColl.insertOne({}, {writeConcern: {w: 0}});
+jsTestLog("Beginning unacknowledged insert");
+primaryColl.insertOne({}, {writeConcern: {w: 0}});
- jsTestLog("Step down primary on port " + primary.port);
- awaitShell({checkExitSuccess: false});
+jsTestLog("Step down primary on port " + primary.port);
+awaitShell({checkExitSuccess: false});
- jsTestLog("Unacknowledged insert during stepdown provoked disconnect");
- var result = assert.throws(function() {
- primary.getDB("admin").isMaster();
- }, [], "network");
- assert.includes(result.toString(), "network error while attempting to run command 'isMaster'");
+jsTestLog("Unacknowledged insert during stepdown provoked disconnect");
+var result = assert.throws(function() {
+ primary.getDB("admin").isMaster();
+}, [], "network");
+assert.includes(result.toString(), "network error while attempting to run command 'isMaster'");
- // Validate the number of unacknowledged writes failed due to step down resulted in network
- // disconnection.
- let failedUnackWritesAfter = getNotMasterUnackWritesCounter();
- assert.eq(failedUnackWritesAfter, failedUnackWritesBefore + 1);
+// Validate the number of unacknowledged writes failed due to step down resulted in network
+// disconnection.
+let failedUnackWritesAfter = getNotMasterUnackWritesCounter();
+assert.eq(failedUnackWritesAfter, failedUnackWritesBefore + 1);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/opcounters_repl.js b/jstests/replsets/opcounters_repl.js
index af45d96ae03..5bf31a1f5ee 100644
--- a/jstests/replsets/opcounters_repl.js
+++ b/jstests/replsets/opcounters_repl.js
@@ -6,97 +6,96 @@
*/
(function() {
- "use strict";
-
- const testName = "opcounters_repl";
- const dbName = testName;
- const rst = new ReplSetTest({name: testName, nodes: 2});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const secondary = rst.getSecondary();
-
- const collName = "coll";
- const collNs = dbName + '.' + collName;
- const primaryColl = primaryDB[collName];
-
- function getOpCounters(node) {
- return assert.commandWorked(node.adminCommand({serverStatus: 1})).opcounters;
+"use strict";
+
+const testName = "opcounters_repl";
+const dbName = testName;
+const rst = new ReplSetTest({name: testName, nodes: 2});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const secondary = rst.getSecondary();
+
+const collName = "coll";
+const collNs = dbName + '.' + collName;
+const primaryColl = primaryDB[collName];
+
+function getOpCounters(node) {
+ return assert.commandWorked(node.adminCommand({serverStatus: 1})).opcounters;
+}
+
+function getOpCountersRepl(node) {
+ return assert.commandWorked(node.adminCommand({serverStatus: 1})).opcountersRepl;
+}
+
+function getOpCountersDiff(cmdFn) {
+ // Get the counters before running cmdFn().
+ const primaryOpCountersBefore = getOpCounters(primary);
+ const secondaryOpCountersReplBefore = getOpCountersRepl(secondary);
+
+ // Run the cmd.
+ cmdFn();
+
+ // Get the counters after running cmdFn().
+ const primaryOpCountersAfter = getOpCounters(primary);
+ const secondaryOpCountersReplAfter = getOpCountersRepl(secondary);
+
+ // Calculate the diff
+ let primaryDiff = {};
+ let secondaryDiff = {};
+ for (let key in primaryOpCountersBefore) {
+ primaryDiff[key] = primaryOpCountersAfter[key] - primaryOpCountersBefore[key];
}
- function getOpCountersRepl(node) {
- return assert.commandWorked(node.adminCommand({serverStatus: 1})).opcountersRepl;
+ for (let key in secondaryOpCountersReplBefore) {
+ secondaryDiff[key] = secondaryOpCountersReplAfter[key] - secondaryOpCountersReplBefore[key];
}
-
- function getOpCountersDiff(cmdFn) {
- // Get the counters before running cmdFn().
- const primaryOpCountersBefore = getOpCounters(primary);
- const secondaryOpCountersReplBefore = getOpCountersRepl(secondary);
-
- // Run the cmd.
- cmdFn();
-
- // Get the counters after running cmdFn().
- const primaryOpCountersAfter = getOpCounters(primary);
- const secondaryOpCountersReplAfter = getOpCountersRepl(secondary);
-
- // Calculate the diff
- let primaryDiff = {};
- let secondaryDiff = {};
- for (let key in primaryOpCountersBefore) {
- primaryDiff[key] = primaryOpCountersAfter[key] - primaryOpCountersBefore[key];
- }
-
- for (let key in secondaryOpCountersReplBefore) {
- secondaryDiff[key] =
- secondaryOpCountersReplAfter[key] - secondaryOpCountersReplBefore[key];
- }
- return {primary: primaryDiff, secondary: secondaryDiff};
- }
-
- // 1. Create collection.
- let diff = getOpCountersDiff(() => {
- assert.commandWorked(primaryDB.createCollection(collName, {writeConcern: {w: 2}}));
- });
- // On primary, the command counter accounts for create command and for other internal
- // commands like replSetUpdatePosition, replSetHeartbeat, serverStatus, etc.
- assert.gte(diff.primary.command, 1);
- assert.eq(diff.secondary.command, 1);
-
- // 2. Insert a document.
- diff = getOpCountersDiff(() => {
- assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 2}}));
- });
- assert.eq(diff.primary.insert, 1);
- assert.eq(diff.secondary.insert, 1);
-
- // 3. Update a document.
- diff = getOpCountersDiff(() => {
- assert.writeOK(primaryColl.update({_id: 0}, {$set: {a: 1}}, {writeConcern: {w: 2}}));
- });
- assert.eq(diff.primary.update, 1);
- assert.eq(diff.secondary.update, 1);
-
- // 4. Delete a document.
- diff = getOpCountersDiff(() => {
- assert.writeOK(primaryColl.remove({_id: 0}, {writeConcern: {w: 2}}));
- });
- assert.eq(diff.primary.delete, 1);
- assert.eq(diff.secondary.delete, 1);
-
- // 5. Atomic insert operation via applyOps cmd.
- diff = getOpCountersDiff(() => {
- assert.commandWorked(primaryColl.runCommand(
- {applyOps: [{op: "i", ns: collNs, o: {_id: 1}}], writeConcern: {w: 2}}));
- });
- // On primary, the command counter accounts for applyOps command and for other internal
- // commands like replSetUpdatePosition, replSetHeartbeat, serverStatus, etc.
- assert.gte(diff.primary.command, 1);
- assert.eq(diff.secondary.command, 0);
- assert.eq(diff.primary.insert, 1);
- assert.eq(diff.secondary.insert, 1);
-
- rst.stopSet();
+ return {primary: primaryDiff, secondary: secondaryDiff};
+}
+
+// 1. Create collection.
+let diff = getOpCountersDiff(() => {
+ assert.commandWorked(primaryDB.createCollection(collName, {writeConcern: {w: 2}}));
+});
+// On primary, the command counter accounts for create command and for other internal
+// commands like replSetUpdatePosition, replSetHeartbeat, serverStatus, etc.
+assert.gte(diff.primary.command, 1);
+assert.eq(diff.secondary.command, 1);
+
+// 2. Insert a document.
+diff = getOpCountersDiff(() => {
+ assert.writeOK(primaryColl.insert({_id: 0}, {writeConcern: {w: 2}}));
+});
+assert.eq(diff.primary.insert, 1);
+assert.eq(diff.secondary.insert, 1);
+
+// 3. Update a document.
+diff = getOpCountersDiff(() => {
+ assert.writeOK(primaryColl.update({_id: 0}, {$set: {a: 1}}, {writeConcern: {w: 2}}));
+});
+assert.eq(diff.primary.update, 1);
+assert.eq(diff.secondary.update, 1);
+
+// 4. Delete a document.
+diff = getOpCountersDiff(() => {
+ assert.writeOK(primaryColl.remove({_id: 0}, {writeConcern: {w: 2}}));
+});
+assert.eq(diff.primary.delete, 1);
+assert.eq(diff.secondary.delete, 1);
+
+// 5. Atomic insert operation via applyOps cmd.
+diff = getOpCountersDiff(() => {
+ assert.commandWorked(primaryColl.runCommand(
+ {applyOps: [{op: "i", ns: collNs, o: {_id: 1}}], writeConcern: {w: 2}}));
+});
+// On primary, the command counter accounts for applyOps command and for other internal
+// commands like replSetUpdatePosition, replSetHeartbeat, serverStatus, etc.
+assert.gte(diff.primary.command, 1);
+assert.eq(diff.secondary.command, 0);
+assert.eq(diff.primary.insert, 1);
+assert.eq(diff.secondary.insert, 1);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/operation_time_read_and_write_concern.js b/jstests/replsets/operation_time_read_and_write_concern.js
index c1661db1d7e..59f6649ce59 100644
--- a/jstests/replsets/operation_time_read_and_write_concern.js
+++ b/jstests/replsets/operation_time_read_and_write_concern.js
@@ -4,124 +4,123 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- // Skip db hash check because replication is stopped on secondaries.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check because replication is stopped on secondaries.
+TestData.skipCheckDBHashes = true;
- load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
- load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries,
- // restartReplicationOnSecondaries
- var name = "operation_time_read_and_write_concern";
+load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
+load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries,
+ // restartReplicationOnSecondaries
+var name = "operation_time_read_and_write_concern";
- var replTest = new ReplSetTest(
- {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ""}, waitForKeys: true});
+var replTest = new ReplSetTest(
+ {name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ""}, waitForKeys: true});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- replTest.stopSet();
- return;
- }
- replTest.initiate();
-
- var res;
- var testDB = replTest.getPrimary().getDB(name);
- var collectionName = "foo";
-
- // readConcern level majority:
- // operationTime is the cluster time of the last committed op in the oplog.
- jsTestLog("Testing operationTime for readConcern level majority with afterClusterTime.");
- var majorityDoc = {_id: 10, x: 1};
- var localDoc = {_id: 15, x: 2};
-
- res = assert.commandWorked(testDB.runCommand(
- {insert: collectionName, documents: [majorityDoc], writeConcern: {w: "majority"}}));
- var majorityWriteOperationTime = res.operationTime;
-
- stopReplicationOnSecondaries(replTest);
-
- res = assert.commandWorked(
- testDB.runCommand({insert: collectionName, documents: [localDoc], writeConcern: {w: 1}}));
- var localWriteOperationTime = res.operationTime;
-
- assert.gt(localWriteOperationTime, majorityWriteOperationTime);
-
- res = assert.commandWorked(testDB.runCommand({
- find: collectionName,
- readConcern: {level: "majority", afterClusterTime: majorityWriteOperationTime}
- }));
- var majorityReadOperationTime = res.operationTime;
-
- assert.eq(res.cursor.firstBatch,
- [majorityDoc],
- "only the committed document, " + tojson(majorityDoc) +
- ", should be returned for the majority read with afterClusterTime: " +
- majorityWriteOperationTime);
- assert.eq(majorityReadOperationTime,
- majorityWriteOperationTime,
- "the operationTime of the majority read, " + majorityReadOperationTime +
- ", should be the cluster time of the last committed op in the oplog, " +
- majorityWriteOperationTime);
-
- // Validate that after replication, the local write data is now returned by the same query.
- restartReplicationOnSecondaries(replTest);
- replTest.awaitLastOpCommitted();
-
- res = assert.commandWorked(testDB.runCommand({
- find: collectionName,
- sort: {_id: 1}, // So the order of the documents is defined for testing.
- readConcern: {level: "majority", afterClusterTime: majorityWriteOperationTime}
- }));
- var secondMajorityReadOperationTime = res.operationTime;
-
- assert.eq(res.cursor.firstBatch,
- [majorityDoc, localDoc],
- "expected both inserted documents, " + tojson([majorityDoc, localDoc]) +
- ", to be returned for the second majority read with afterClusterTime: " +
- majorityWriteOperationTime);
- assert.eq(secondMajorityReadOperationTime,
- localWriteOperationTime,
- "the operationTime of the second majority read, " + secondMajorityReadOperationTime +
- ", should be the cluster time of the replicated local write, " +
- localWriteOperationTime);
-
- // readConcern level linearizable is not currently supported.
- jsTestLog("Verifying readConcern linearizable with afterClusterTime is not supported.");
- res = assert.commandFailedWithCode(
- testDB.runCommand({
- find: collectionName,
- filter: localDoc,
- readConcern: {level: "linearizable", afterClusterTime: majorityReadOperationTime}
- }),
- ErrorCodes.InvalidOptions,
- "linearizable reads with afterClusterTime are not supported and should not be allowed");
-
- // writeConcern level majority:
- // operationTime is the cluster time of the write if it succeeds, or of the previous successful
- // write at the time the write was determined to have failed, or a no-op.
- jsTestLog("Testing operationTime for writeConcern level majority.");
- var successfulDoc = {_id: 1000, y: 1};
- var failedDoc = {_id: 1000, y: 2};
-
- res = assert.commandWorked(testDB.runCommand(
- {insert: collectionName, documents: [successfulDoc], writeConcern: {w: "majority"}}));
- var majorityWriteOperationTime = res.operationTime;
-
- stopReplicationOnSecondaries(replTest);
-
- res = testDB.runCommand({
- insert: collectionName,
- documents: [failedDoc],
- writeConcern: {w: "majority", wtimeout: 1000}
- });
- assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
- var failedWriteOperationTime = res.operationTime;
-
- assert.eq(
- failedWriteOperationTime,
- majorityWriteOperationTime,
- "the operationTime of the failed majority write, " + failedWriteOperationTime +
- ", should be the cluster time of the last successful write at the time it failed, " +
- majorityWriteOperationTime);
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
replTest.stopSet();
+ return;
+}
+replTest.initiate();
+
+var res;
+var testDB = replTest.getPrimary().getDB(name);
+var collectionName = "foo";
+
+// readConcern level majority:
+// operationTime is the cluster time of the last committed op in the oplog.
+jsTestLog("Testing operationTime for readConcern level majority with afterClusterTime.");
+var majorityDoc = {_id: 10, x: 1};
+var localDoc = {_id: 15, x: 2};
+
+res = assert.commandWorked(testDB.runCommand(
+ {insert: collectionName, documents: [majorityDoc], writeConcern: {w: "majority"}}));
+var majorityWriteOperationTime = res.operationTime;
+
+stopReplicationOnSecondaries(replTest);
+
+res = assert.commandWorked(
+ testDB.runCommand({insert: collectionName, documents: [localDoc], writeConcern: {w: 1}}));
+var localWriteOperationTime = res.operationTime;
+
+assert.gt(localWriteOperationTime, majorityWriteOperationTime);
+
+res = assert.commandWorked(testDB.runCommand({
+ find: collectionName,
+ readConcern: {level: "majority", afterClusterTime: majorityWriteOperationTime}
+}));
+var majorityReadOperationTime = res.operationTime;
+
+assert.eq(res.cursor.firstBatch,
+ [majorityDoc],
+ "only the committed document, " + tojson(majorityDoc) +
+ ", should be returned for the majority read with afterClusterTime: " +
+ majorityWriteOperationTime);
+assert.eq(majorityReadOperationTime,
+ majorityWriteOperationTime,
+ "the operationTime of the majority read, " + majorityReadOperationTime +
+ ", should be the cluster time of the last committed op in the oplog, " +
+ majorityWriteOperationTime);
+
+// Validate that after replication, the local write data is now returned by the same query.
+restartReplicationOnSecondaries(replTest);
+replTest.awaitLastOpCommitted();
+
+res = assert.commandWorked(testDB.runCommand({
+ find: collectionName,
+ sort: {_id: 1}, // So the order of the documents is defined for testing.
+ readConcern: {level: "majority", afterClusterTime: majorityWriteOperationTime}
+}));
+var secondMajorityReadOperationTime = res.operationTime;
+
+assert.eq(res.cursor.firstBatch,
+ [majorityDoc, localDoc],
+ "expected both inserted documents, " + tojson([majorityDoc, localDoc]) +
+ ", to be returned for the second majority read with afterClusterTime: " +
+ majorityWriteOperationTime);
+assert.eq(secondMajorityReadOperationTime,
+ localWriteOperationTime,
+ "the operationTime of the second majority read, " + secondMajorityReadOperationTime +
+ ", should be the cluster time of the replicated local write, " +
+ localWriteOperationTime);
+
+// readConcern level linearizable is not currently supported.
+jsTestLog("Verifying readConcern linearizable with afterClusterTime is not supported.");
+res = assert.commandFailedWithCode(
+ testDB.runCommand({
+ find: collectionName,
+ filter: localDoc,
+ readConcern: {level: "linearizable", afterClusterTime: majorityReadOperationTime}
+ }),
+ ErrorCodes.InvalidOptions,
+ "linearizable reads with afterClusterTime are not supported and should not be allowed");
+
+// writeConcern level majority:
+// operationTime is the cluster time of the write if it succeeds, or of the previous successful
+// write at the time the write was determined to have failed, or a no-op.
+jsTestLog("Testing operationTime for writeConcern level majority.");
+var successfulDoc = {_id: 1000, y: 1};
+var failedDoc = {_id: 1000, y: 2};
+
+res = assert.commandWorked(testDB.runCommand(
+ {insert: collectionName, documents: [successfulDoc], writeConcern: {w: "majority"}}));
+var majorityWriteOperationTime = res.operationTime;
+
+stopReplicationOnSecondaries(replTest);
+
+res = testDB.runCommand({
+ insert: collectionName,
+ documents: [failedDoc],
+ writeConcern: {w: "majority", wtimeout: 1000}
+});
+assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
+var failedWriteOperationTime = res.operationTime;
+
+assert.eq(failedWriteOperationTime,
+ majorityWriteOperationTime,
+ "the operationTime of the failed majority write, " + failedWriteOperationTime +
+ ", should be the cluster time of the last successful write at the time it failed, " +
+ majorityWriteOperationTime);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/oplog_format_create_indexes.js b/jstests/replsets/oplog_format_create_indexes.js
index 0697df44b07..117d5be04bb 100644
--- a/jstests/replsets/oplog_format_create_indexes.js
+++ b/jstests/replsets/oplog_format_create_indexes.js
@@ -3,78 +3,78 @@
* creation.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/get_index_helpers.js");
+load("jstests/libs/get_index_helpers.js");
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
+const primary = rst.getPrimary();
- const testDB = primary.getDB("test");
- const oplogColl = primary.getDB("local").oplog.rs;
+const testDB = primary.getDB("test");
+const oplogColl = primary.getDB("local").oplog.rs;
- function testOplogEntryContainsIndexInfoObj(coll, keyPattern, indexOptions) {
- assert.commandWorked(coll.createIndex(keyPattern, indexOptions));
- const allIndexes = coll.getIndexes();
- const indexSpec = GetIndexHelpers.findByKeyPattern(allIndexes, keyPattern);
+function testOplogEntryContainsIndexInfoObj(coll, keyPattern, indexOptions) {
+ assert.commandWorked(coll.createIndex(keyPattern, indexOptions));
+ const allIndexes = coll.getIndexes();
+ const indexSpec = GetIndexHelpers.findByKeyPattern(allIndexes, keyPattern);
- assert.neq(
- null,
- indexSpec,
- "Index with key pattern " + tojson(keyPattern) + " not found: " + tojson(allIndexes));
+ assert.neq(
+ null,
+ indexSpec,
+ "Index with key pattern " + tojson(keyPattern) + " not found: " + tojson(allIndexes));
- // Find the createIndexes command entries.
- const indexCreationOplogQuery = {
- op: "c",
- ns: testDB.getName() + ".$cmd", "o.createIndexes": coll.getName()
- };
+ // Find the createIndexes command entries.
+ const indexCreationOplogQuery = {
+ op: "c",
+ ns: testDB.getName() + ".$cmd",
+ "o.createIndexes": coll.getName()
+ };
- const allOplogEntries = oplogColl.find(indexCreationOplogQuery).toArray();
+ const allOplogEntries = oplogColl.find(indexCreationOplogQuery).toArray();
- // Preserve the JSON version of the originals, as we're going to delete fields.
- const allOplogEntriesJson = tojson(allOplogEntries);
- const indexSpecJson = tojson(indexSpec);
+ // Preserve the JSON version of the originals, as we're going to delete fields.
+ const allOplogEntriesJson = tojson(allOplogEntries);
+ const indexSpecJson = tojson(indexSpec);
- // Because of differences between the new and old oplog entries for createIndexes,
- // treat the namespace part separately and compare entries without ns field.
- const indexSpecNs = indexSpec.ns;
- delete indexSpec.ns;
- const found = allOplogEntries.filter((entry) => {
- const entryNs = entry.o.ns || testDB.getName() + "." + entry.o.createIndexes;
- const entrySpec = entry.o;
- delete entrySpec.ns;
- delete entrySpec.createIndexes;
- return indexSpecNs === entryNs && bsonWoCompare(indexSpec, entrySpec) === 0;
- });
- assert.eq(1,
- found.length,
- "Failed to find full index specification " + indexSpecJson +
- " in any oplog entry from index creation: " + allOplogEntriesJson);
+ // Because of differences between the new and old oplog entries for createIndexes,
+ // treat the namespace part separately and compare entries without ns field.
+ const indexSpecNs = indexSpec.ns;
+ delete indexSpec.ns;
+ const found = allOplogEntries.filter((entry) => {
+ const entryNs = entry.o.ns || testDB.getName() + "." + entry.o.createIndexes;
+ const entrySpec = entry.o;
+ delete entrySpec.ns;
+ delete entrySpec.createIndexes;
+ return indexSpecNs === entryNs && bsonWoCompare(indexSpec, entrySpec) === 0;
+ });
+ assert.eq(1,
+ found.length,
+ "Failed to find full index specification " + indexSpecJson +
+ " in any oplog entry from index creation: " + allOplogEntriesJson);
- assert.commandWorked(coll.dropIndex(keyPattern));
- }
+ assert.commandWorked(coll.dropIndex(keyPattern));
+}
- // Test that options both explicitly included in the command and implicitly filled in with
- // defaults by the server are serialized into the corresponding oplog entry.
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format, {withoutAnyOptions: 1});
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format, {withV1: 1}, {v: 1});
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format,
- {partialIndex: 1},
- {partialFilterExpression: {field: {$exists: true}}});
+// Test that options both explicitly included in the command and implicitly filled in with
+// defaults by the server are serialized into the corresponding oplog entry.
+testOplogEntryContainsIndexInfoObj(testDB.oplog_format, {withoutAnyOptions: 1});
+testOplogEntryContainsIndexInfoObj(testDB.oplog_format, {withV1: 1}, {v: 1});
+testOplogEntryContainsIndexInfoObj(
+ testDB.oplog_format, {partialIndex: 1}, {partialFilterExpression: {field: {$exists: true}}});
- // Test that the representation of an index's collation in the oplog on a collection with a
- // non-simple default collation exactly matches that of the index's full specification.
- assert.commandWorked(
- testDB.runCommand({create: "oplog_format_collation", collation: {locale: "fr"}}));
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format_collation, {withDefaultCollation: 1});
- testOplogEntryContainsIndexInfoObj(
- testDB.oplog_format_collation, {withNonDefaultCollation: 1}, {collation: {locale: "en"}});
- testOplogEntryContainsIndexInfoObj(testDB.oplog_format_collation, {withV1: 1}, {v: 1});
- testOplogEntryContainsIndexInfoObj(
- testDB.oplog_format_collation, {withSimpleCollation: 1}, {collation: {locale: "simple"}});
+// Test that the representation of an index's collation in the oplog on a collection with a
+// non-simple default collation exactly matches that of the index's full specification.
+assert.commandWorked(
+ testDB.runCommand({create: "oplog_format_collation", collation: {locale: "fr"}}));
+testOplogEntryContainsIndexInfoObj(testDB.oplog_format_collation, {withDefaultCollation: 1});
+testOplogEntryContainsIndexInfoObj(
+ testDB.oplog_format_collation, {withNonDefaultCollation: 1}, {collation: {locale: "en"}});
+testOplogEntryContainsIndexInfoObj(testDB.oplog_format_collation, {withV1: 1}, {v: 1});
+testOplogEntryContainsIndexInfoObj(
+ testDB.oplog_format_collation, {withSimpleCollation: 1}, {collation: {locale: "simple"}});
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
index ee88b7a4a98..d9d3afca25d 100644
--- a/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
+++ b/jstests/replsets/oplog_replay_on_startup_with_bad_op.js
@@ -4,62 +4,62 @@
//
// @tags: [requires_persistence]
(function() {
- "use strict";
+"use strict";
- var rst = new ReplSetTest({
- nodes: 1,
- });
+var rst = new ReplSetTest({
+ nodes: 1,
+});
- rst.startSet();
- rst.initiate();
+rst.startSet();
+rst.initiate();
- var conn = rst.getPrimary(); // Waits for PRIMARY state.
- conn = rst.restart(0, {noReplSet: true}); // Restart as a standalone node.
- assert.neq(null, conn, "failed to restart");
+var conn = rst.getPrimary(); // Waits for PRIMARY state.
+conn = rst.restart(0, {noReplSet: true}); // Restart as a standalone node.
+assert.neq(null, conn, "failed to restart");
- var oplog = conn.getCollection('local.oplog.rs');
- var lastOplogDoc = conn.getCollection('local.oplog.rs').find().sort({$natural: -1}).limit(1)[0];
- var lastTs = lastOplogDoc.ts;
- var newTs = Timestamp(lastTs.t + 1, 1);
- var term = lastOplogDoc.t;
+var oplog = conn.getCollection('local.oplog.rs');
+var lastOplogDoc = conn.getCollection('local.oplog.rs').find().sort({$natural: -1}).limit(1)[0];
+var lastTs = lastOplogDoc.ts;
+var newTs = Timestamp(lastTs.t + 1, 1);
+var term = lastOplogDoc.t;
- assert.writeOK(oplog.insert({
- ts: newTs,
- t: term,
- h: 1,
- op: 'c',
- ns: 'somedb.$cmd',
- o: {thereIsNoCommandWithThisName: 1},
- }));
-
- var injectedMinValidDoc = {
- _id: ObjectId(),
+assert.writeOK(oplog.insert({
+ ts: newTs,
+ t: term,
+ h: 1,
+ op: 'c',
+ ns: 'somedb.$cmd',
+ o: {thereIsNoCommandWithThisName: 1},
+}));
- // appliedThrough
- begin: {
- ts: lastTs,
- t: term,
- },
+var injectedMinValidDoc = {
+ _id: ObjectId(),
- // minvalid:
+ // appliedThrough
+ begin: {
+ ts: lastTs,
t: term,
- ts: newTs,
- };
+ },
+
+ // minvalid:
+ t: term,
+ ts: newTs,
+};
- // This weird mechanism is the only way to bypass mongod's attempt to fill in null
- // Timestamps.
- var minValidColl = conn.getCollection('local.replset.minvalid');
- assert.writeOK(minValidColl.remove({}));
- assert.writeOK(minValidColl.update({}, {$set: injectedMinValidDoc}, {upsert: true}));
- assert.eq(minValidColl.findOne(),
- injectedMinValidDoc,
- "If the Timestamps differ, the server may be filling in the null timestamps");
+// This weird mechanism is the only way to bypass mongod's attempt to fill in null
+// Timestamps.
+var minValidColl = conn.getCollection('local.replset.minvalid');
+assert.writeOK(minValidColl.remove({}));
+assert.writeOK(minValidColl.update({}, {$set: injectedMinValidDoc}, {upsert: true}));
+assert.eq(minValidColl.findOne(),
+ injectedMinValidDoc,
+ "If the Timestamps differ, the server may be filling in the null timestamps");
- assert.throws(() => rst.restart(0)); // Restart in replSet mode again.
+assert.throws(() => rst.restart(0)); // Restart in replSet mode again.
- // fassert() calls std::abort(), which returns a different exit code for Windows vs. other
- // platforms.
- const exitCode = _isWindows() ? MongoRunner.EXIT_ABRUPT : MongoRunner.EXIT_ABORT;
- rst.stop(0, undefined, {allowedExitCode: exitCode});
- rst.stopSet();
+// fassert() calls std::abort(), which returns a different exit code for Windows vs. other
+// platforms.
+const exitCode = _isWindows() ? MongoRunner.EXIT_ABRUPT : MongoRunner.EXIT_ABORT;
+rst.stop(0, undefined, {allowedExitCode: exitCode});
+rst.stopSet();
})();
diff --git a/jstests/replsets/oplog_rollover.js b/jstests/replsets/oplog_rollover.js
index e5532585ab7..de6e83f1354 100644
--- a/jstests/replsets/oplog_rollover.js
+++ b/jstests/replsets/oplog_rollover.js
@@ -1,132 +1,129 @@
/**
* Test that oplog (on both primary and secondary) rolls over when its size exceeds the configured
* maximum. This test runs on wiredTiger storage engine and inMemory storage engine (if available).
-*/
+ */
(function() {
- "use strict";
+"use strict";
- function doTest(storageEngine) {
- jsTestLog("Testing with storageEngine: " + storageEngine);
+function doTest(storageEngine) {
+ jsTestLog("Testing with storageEngine: " + storageEngine);
- const replSet = new ReplSetTest({
- // Set the syncdelay to 1s to speed up checkpointing.
- nodeOptions: {syncdelay: 1},
- nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
- });
- // Set max oplog size to 1MB.
- replSet.startSet({storageEngine: storageEngine, oplogSize: 1});
- replSet.initiate();
+ const replSet = new ReplSetTest({
+ // Set the syncdelay to 1s to speed up checkpointing.
+ nodeOptions: {syncdelay: 1},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
+ });
+ // Set max oplog size to 1MB.
+ replSet.startSet({storageEngine: storageEngine, oplogSize: 1});
+ replSet.initiate();
- const primary = replSet.getPrimary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- const secondary = replSet.getSecondary();
- const secondaryOplog = secondary.getDB("local").oplog.rs;
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ const secondary = replSet.getSecondary();
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
- const coll = primary.getDB("test").foo;
- // 400KB each so that oplog can keep at most two insert oplog entries.
- const longString = new Array(400 * 1024).join("a");
+ const coll = primary.getDB("test").foo;
+ // 400KB each so that oplog can keep at most two insert oplog entries.
+ const longString = new Array(400 * 1024).join("a");
- function numInsertOplogEntry(oplog) {
- return oplog.find({op: "i", "ns": "test.foo"}).itcount();
- }
-
- // Insert the first document.
- assert.commandWorked(coll.insert({_id: 0, longString: longString}, {writeConcern: {w: 2}}));
- // Test that oplog entry of the first insert exists on both primary and secondary.
- assert.eq(1, numInsertOplogEntry(primaryOplog));
- assert.eq(1, numInsertOplogEntry(secondaryOplog));
-
- // Insert the second document.
- const secondInsertTimestamp =
- assert
- .commandWorked(coll.runCommand(
- "insert",
- {documents: [{_id: 1, longString: longString}], writeConcern: {w: 2}}))
- .operationTime;
- // Test that oplog entries of both inserts exist on both primary and secondary.
- assert.eq(2, numInsertOplogEntry(primaryOplog));
- assert.eq(2, numInsertOplogEntry(secondaryOplog));
+ function numInsertOplogEntry(oplog) {
+ return oplog.find({op: "i", "ns": "test.foo"}).itcount();
+ }
- // Have a more fine-grained test for enableMajorityReadConcern=true to also test oplog
- // truncation happens at the time we expect it to happen. When
- // enableMajorityReadConcern=false the lastStableRecoveryTimestamp is not available, so
- // switch to a coarser-grained mode to only test that oplog truncation will eventually
- // happen when oplog size exceeds the configured maximum.
- if (primary.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
- // Wait for checkpointing/stable timestamp to catch up with the second insert so oplog
- // entry of the first insert is allowed to be deleted by the oplog cap maintainer thread
- // when a new oplog stone is created. "inMemory" WT engine does not run checkpoint
- // thread and lastStableRecoveryTimestamp is the stable timestamp in this case.
- assert.soon(
- () => {
- const primaryTimestamp =
- assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}))
- .lastStableRecoveryTimestamp;
- const secondaryTimestamp =
- assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}))
- .lastStableRecoveryTimestamp;
- if (primaryTimestamp >= secondInsertTimestamp &&
- secondaryTimestamp >= secondInsertTimestamp) {
- return true;
- } else {
- jsTestLog(
- "Awaiting last stable recovery timestamp " +
- `(primary: ${primaryTimestamp}, secondary: ${secondaryTimestamp}) ` +
- `target: ${secondInsertTimestamp}`);
- return false;
- }
- },
- "Timeout waiting for checkpointing to catch up with the second insert",
- ReplSetTest.kDefaultTimeoutMS,
- 2000);
+ // Insert the first document.
+ assert.commandWorked(coll.insert({_id: 0, longString: longString}, {writeConcern: {w: 2}}));
+ // Test that oplog entry of the first insert exists on both primary and secondary.
+ assert.eq(1, numInsertOplogEntry(primaryOplog));
+ assert.eq(1, numInsertOplogEntry(secondaryOplog));
- // Insert the third document which will trigger a new oplog stone to be created. The
- // oplog cap maintainer thread will then be unblocked on the creation of the new oplog
- // stone and will start truncating oplog entries. The oplog entry for the first
- // insert will be truncated after the oplog cap maintainer thread finishes.
- assert.commandWorked(
- coll.insert({_id: 2, longString: longString}, {writeConcern: {w: 2}}));
+ // Insert the second document.
+ const secondInsertTimestamp =
+ assert
+ .commandWorked(coll.runCommand(
+ "insert", {documents: [{_id: 1, longString: longString}], writeConcern: {w: 2}}))
+ .operationTime;
+ // Test that oplog entries of both inserts exist on both primary and secondary.
+ assert.eq(2, numInsertOplogEntry(primaryOplog));
+ assert.eq(2, numInsertOplogEntry(secondaryOplog));
- // Test that oplog entry of the initial insert rolls over on both primary and secondary.
- // Use assert.soon to wait for oplog cap maintainer thread to run.
- assert.soon(() => {
- return numInsertOplogEntry(primaryOplog) === 2;
- }, "Timeout waiting for oplog to roll over on primary");
- assert.soon(() => {
- return numInsertOplogEntry(secondaryOplog) === 2;
- }, "Timeout waiting for oplog to roll over on secondary");
- } else {
- // Only test that oplog truncation will eventually happen.
- let numInserted = 2;
- assert.soon(function() {
- // Insert more documents.
- assert.commandWorked(coll.insert({_id: numInserted++, longString: longString},
- {writeConcern: {w: 2}}));
- const numInsertOplogEntryPrimary = numInsertOplogEntry(primaryOplog);
- const numInsertOplogEntrySecondary = numInsertOplogEntry(secondaryOplog);
- // Oplog has been truncated if the number of insert oplog entries is less than
- // number of inserted.
- if (numInsertOplogEntryPrimary < numInserted &&
- numInsertOplogEntrySecondary < numInserted)
+ // Have a more fine-grained test for enableMajorityReadConcern=true to also test oplog
+ // truncation happens at the time we expect it to happen. When
+ // enableMajorityReadConcern=false the lastStableRecoveryTimestamp is not available, so
+ // switch to a coarser-grained mode to only test that oplog truncation will eventually
+ // happen when oplog size exceeds the configured maximum.
+ if (primary.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
+ // Wait for checkpointing/stable timestamp to catch up with the second insert so oplog
+ // entry of the first insert is allowed to be deleted by the oplog cap maintainer thread
+ // when a new oplog stone is created. "inMemory" WT engine does not run checkpoint
+ // thread and lastStableRecoveryTimestamp is the stable timestamp in this case.
+ assert.soon(
+ () => {
+ const primaryTimestamp =
+ assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}))
+ .lastStableRecoveryTimestamp;
+ const secondaryTimestamp =
+ assert.commandWorked(secondary.adminCommand({replSetGetStatus: 1}))
+ .lastStableRecoveryTimestamp;
+ if (primaryTimestamp >= secondInsertTimestamp &&
+ secondaryTimestamp >= secondInsertTimestamp) {
return true;
- jsTestLog("Awaiting oplog truncation: number of oplog entries: " +
- `(primary: ${numInsertOplogEntryPrimary}, ` +
- `secondary: ${numInsertOplogEntrySecondary}) ` +
- `number inserted: ${numInserted}`);
- return false;
- }, "Timeout waiting for oplog to roll over", ReplSetTest.kDefaultTimeoutMS, 1000);
- }
+ } else {
+ jsTestLog("Awaiting last stable recovery timestamp " +
+ `(primary: ${primaryTimestamp}, secondary: ${secondaryTimestamp}) ` +
+ `target: ${secondInsertTimestamp}`);
+ return false;
+ }
+ },
+ "Timeout waiting for checkpointing to catch up with the second insert",
+ ReplSetTest.kDefaultTimeoutMS,
+ 2000);
- replSet.stopSet();
+ // Insert the third document which will trigger a new oplog stone to be created. The
+ // oplog cap maintainer thread will then be unblocked on the creation of the new oplog
+ // stone and will start truncating oplog entries. The oplog entry for the first
+ // insert will be truncated after the oplog cap maintainer thread finishes.
+ assert.commandWorked(coll.insert({_id: 2, longString: longString}, {writeConcern: {w: 2}}));
+
+ // Test that oplog entry of the initial insert rolls over on both primary and secondary.
+ // Use assert.soon to wait for oplog cap maintainer thread to run.
+ assert.soon(() => {
+ return numInsertOplogEntry(primaryOplog) === 2;
+ }, "Timeout waiting for oplog to roll over on primary");
+ assert.soon(() => {
+ return numInsertOplogEntry(secondaryOplog) === 2;
+ }, "Timeout waiting for oplog to roll over on secondary");
+ } else {
+ // Only test that oplog truncation will eventually happen.
+ let numInserted = 2;
+ assert.soon(function() {
+ // Insert more documents.
+ assert.commandWorked(
+ coll.insert({_id: numInserted++, longString: longString}, {writeConcern: {w: 2}}));
+ const numInsertOplogEntryPrimary = numInsertOplogEntry(primaryOplog);
+ const numInsertOplogEntrySecondary = numInsertOplogEntry(secondaryOplog);
+ // Oplog has been truncated if the number of insert oplog entries is less than
+ // number of inserted.
+ if (numInsertOplogEntryPrimary < numInserted &&
+ numInsertOplogEntrySecondary < numInserted)
+ return true;
+ jsTestLog("Awaiting oplog truncation: number of oplog entries: " +
+ `(primary: ${numInsertOplogEntryPrimary}, ` +
+ `secondary: ${numInsertOplogEntrySecondary}) ` +
+ `number inserted: ${numInserted}`);
+ return false;
+ }, "Timeout waiting for oplog to roll over", ReplSetTest.kDefaultTimeoutMS, 1000);
}
- doTest("wiredTiger");
+ replSet.stopSet();
+}
- if (jsTest.options().storageEngine !== "inMemory") {
- jsTestLog(
- "Skipping inMemory test because inMemory storageEngine was not compiled into the server.");
- return;
- }
+doTest("wiredTiger");
+
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTestLog(
+ "Skipping inMemory test because inMemory storageEngine was not compiled into the server.");
+ return;
+}
- doTest("inMemory");
+doTest("inMemory");
})();
diff --git a/jstests/replsets/oplog_term.js b/jstests/replsets/oplog_term.js
index 93b650ab637..f21e01f4a98 100644
--- a/jstests/replsets/oplog_term.js
+++ b/jstests/replsets/oplog_term.js
@@ -1,34 +1,33 @@
// Term counter should be present in oplog entries under protocol version 1.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
+'use strict';
+load('jstests/replsets/rslib.js');
- var name = 'oplog_term';
- var replSet = new ReplSetTest({name: name, nodes: 1});
- replSet.startSet();
- replSet.initiate();
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000);
+var name = 'oplog_term';
+var replSet = new ReplSetTest({name: name, nodes: 1});
+replSet.startSet();
+replSet.initiate();
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000);
- // Default protocol version is 1 - 'term' field should present in oplog entry.
- var primary = replSet.getPrimary();
- var collection = primary.getDB('test').getCollection(name);
- assert.writeOK(collection.save({_id: 1}));
+// Default protocol version is 1 - 'term' field should present in oplog entry.
+var primary = replSet.getPrimary();
+var collection = primary.getDB('test').getCollection(name);
+assert.writeOK(collection.save({_id: 1}));
- var oplogEntry = getLatestOp(primary);
- assert(oplogEntry, 'unexpected empty oplog');
- assert.eq(collection.getFullName(),
- oplogEntry.ns,
- 'unexpected namespace in oplog entry: ' + tojson(oplogEntry));
- assert.eq(
- 1,
- oplogEntry.o._id,
- 'oplog entry does not refer to most recently inserted document: ' + tojson(oplogEntry));
- assert(oplogEntry.hasOwnProperty('t'), 'oplog entry must contain term: ' + tojson(oplogEntry));
+var oplogEntry = getLatestOp(primary);
+assert(oplogEntry, 'unexpected empty oplog');
+assert.eq(collection.getFullName(),
+ oplogEntry.ns,
+ 'unexpected namespace in oplog entry: ' + tojson(oplogEntry));
+assert.eq(1,
+ oplogEntry.o._id,
+ 'oplog entry does not refer to most recently inserted document: ' + tojson(oplogEntry));
+assert(oplogEntry.hasOwnProperty('t'), 'oplog entry must contain term: ' + tojson(oplogEntry));
- var status = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}));
- assert.eq(status.term,
- oplogEntry.t,
- 'term in oplog entry does not match term in status: ' + tojson(oplogEntry));
+var status = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}));
+assert.eq(status.term,
+ oplogEntry.t,
+ 'term in oplog entry does not match term in status: ' + tojson(oplogEntry));
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/oplog_visibility.js b/jstests/replsets/oplog_visibility.js
index ccdcf5c6d93..2c3c49f839e 100644
--- a/jstests/replsets/oplog_visibility.js
+++ b/jstests/replsets/oplog_visibility.js
@@ -6,122 +6,122 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/parallelTester.js"); // for ScopedThread.
+load("jstests/libs/parallelTester.js"); // for ScopedThread.
- const replTest = new ReplSetTest({
- name: "oplog_visibility",
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: true}
- });
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({
+ name: "oplog_visibility",
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: true}
+});
+replTest.startSet();
+replTest.initiate();
- jsTestLog("Enabling `sleepBeforeCommit` failpoint.");
- for (let node of replTest.nodes) {
- assert.commandWorked(node.adminCommand(
- {configureFailPoint: "sleepBeforeCommit", mode: {activationProbability: 0.01}}));
- }
+jsTestLog("Enabling `sleepBeforeCommit` failpoint.");
+for (let node of replTest.nodes) {
+ assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "sleepBeforeCommit", mode: {activationProbability: 0.01}}));
+}
- jsTestLog("Starting concurrent writers.");
- let stopLatch = new CountDownLatch(1);
- let writers = [];
- for (let idx = 0; idx < 2; ++idx) {
- let coll = "coll_" + idx;
- let writer = new ScopedThread(function(host, coll, stopLatch) {
- const conn = new Mongo(host);
- let id = 0;
+jsTestLog("Starting concurrent writers.");
+let stopLatch = new CountDownLatch(1);
+let writers = [];
+for (let idx = 0; idx < 2; ++idx) {
+ let coll = "coll_" + idx;
+ let writer = new ScopedThread(function(host, coll, stopLatch) {
+ const conn = new Mongo(host);
+ let id = 0;
- // Cap the amount of data being inserted to avoid rolling over a 10MiB oplog. It takes
- // ~70,000 "basic" ~150 byte oplog documents to fill a 10MiB oplog. Note this number is
- // for each of two writer threads.
- const maxDocsToInsert = 20 * 1000;
- while (stopLatch.getCount() > 0 && id < maxDocsToInsert) {
- conn.getDB("test").getCollection(coll).insert({_id: id});
- id++;
- }
- jsTestLog({"NumDocsWritten": id});
- }, replTest.getPrimary().host, coll, stopLatch);
+ // Cap the amount of data being inserted to avoid rolling over a 10MiB oplog. It takes
+ // ~70,000 "basic" ~150 byte oplog documents to fill a 10MiB oplog. Note this number is
+ // for each of two writer threads.
+ const maxDocsToInsert = 20 * 1000;
+ while (stopLatch.getCount() > 0 && id < maxDocsToInsert) {
+ conn.getDB("test").getCollection(coll).insert({_id: id});
+ id++;
+ }
+ jsTestLog({"NumDocsWritten": id});
+ }, replTest.getPrimary().host, coll, stopLatch);
- writer.start();
- writers.push(writer);
- }
+ writer.start();
+ writers.push(writer);
+}
- for (let node of replTest.nodes) {
- let testOplog = function(node) {
- let timestamps = [];
+for (let node of replTest.nodes) {
+ let testOplog = function(node) {
+ let timestamps = [];
- let local = node.getDB("local");
- let oplogStart =
- local.getCollection("oplog.rs").find().sort({$natural: -1}).limit(-1).next()["ts"];
- jsTestLog({"Node": node.host, "StartTs": oplogStart});
+ let local = node.getDB("local");
+ let oplogStart =
+ local.getCollection("oplog.rs").find().sort({$natural: -1}).limit(-1).next()["ts"];
+ jsTestLog({"Node": node.host, "StartTs": oplogStart});
- while (timestamps.length < 1000) {
- // Query with $gte to validate continuinity. Do not add this first record to the
- // recorded timestamps. Its value was already added in the last cursor.
- let cursor = local.getCollection("oplog.rs")
- .find({ts: {$gte: oplogStart}})
- .sort({$natural: 1})
- .tailable(true)
- .batchSize(100);
- assert(cursor.hasNext());
- assert.eq(oplogStart, cursor.next()["ts"]);
+ while (timestamps.length < 1000) {
+ // Query with $gte to validate continuinity. Do not add this first record to the
+ // recorded timestamps. Its value was already added in the last cursor.
+ let cursor = local.getCollection("oplog.rs")
+ .find({ts: {$gte: oplogStart}})
+ .sort({$natural: 1})
+ .tailable(true)
+ .batchSize(100);
+ assert(cursor.hasNext());
+ assert.eq(oplogStart, cursor.next()["ts"]);
- // While this method wants to capture 1000 timestamps, the cursor has a batch size
- // of 100 and this loop makes 200 iterations before getting a new cursor from a
- // fresh query. The goal is to exercise getMores, which use different code paths
- // for establishing their oplog reader transactions.
- for (let num = 0; num < 200 && timestamps.length < 1000; ++num) {
- try {
- if (cursor.hasNext() == false) {
- break;
- }
- } catch (exc) {
+ // While this method wants to capture 1000 timestamps, the cursor has a batch size
+ // of 100 and this loop makes 200 iterations before getting a new cursor from a
+ // fresh query. The goal is to exercise getMores, which use different code paths
+ // for establishing their oplog reader transactions.
+ for (let num = 0; num < 200 && timestamps.length < 1000; ++num) {
+ try {
+ if (cursor.hasNext() == false) {
break;
}
- let ts = cursor.next()["ts"];
- timestamps.push(ts);
- oplogStart = ts;
+ } catch (exc) {
+ break;
}
+ let ts = cursor.next()["ts"];
+ timestamps.push(ts);
+ oplogStart = ts;
}
+ }
- jsTestLog({"Verifying": node.host, "StartTs": timestamps[0], "EndTs": timestamps[999]});
- oplogStart = timestamps[0];
- let cursor =
- local.getCollection("oplog.rs").find({ts: {$gte: oplogStart}}).sort({$natural: 1});
- for (let observedTsIdx in timestamps) {
- let observedTs = timestamps[observedTsIdx];
- assert(cursor.hasNext());
- let actualTs = cursor.next()["ts"];
- assert.eq(actualTs, observedTs, function() {
- let prev = null;
- let next = null;
- if (observedTsIdx > 0) {
- prev = timestamps[observedTsIdx - 1];
- }
- if (observedTsIdx + 1 < timestamps.length) {
- next = timestamps[observedTsIdx + 1];
- }
+ jsTestLog({"Verifying": node.host, "StartTs": timestamps[0], "EndTs": timestamps[999]});
+ oplogStart = timestamps[0];
+ let cursor =
+ local.getCollection("oplog.rs").find({ts: {$gte: oplogStart}}).sort({$natural: 1});
+ for (let observedTsIdx in timestamps) {
+ let observedTs = timestamps[observedTsIdx];
+ assert(cursor.hasNext());
+ let actualTs = cursor.next()["ts"];
+ assert.eq(actualTs, observedTs, function() {
+ let prev = null;
+ let next = null;
+ if (observedTsIdx > 0) {
+ prev = timestamps[observedTsIdx - 1];
+ }
+ if (observedTsIdx + 1 < timestamps.length) {
+ next = timestamps[observedTsIdx + 1];
+ }
- return tojson({
- "Missing": actualTs,
- "ObservedIdx": observedTsIdx,
- "PrevObserved": prev,
- "NextObserved": next
- });
+ return tojson({
+ "Missing": actualTs,
+ "ObservedIdx": observedTsIdx,
+ "PrevObserved": prev,
+ "NextObserved": next
});
- }
- };
+ });
+ }
+ };
- jsTestLog({"Testing": node.host});
- testOplog(node);
- }
- jsTestLog("Stopping writers.");
- stopLatch.countDown();
- writers.forEach((writer) => {
- writer.join();
- });
+ jsTestLog({"Testing": node.host});
+ testOplog(node);
+}
+jsTestLog("Stopping writers.");
+stopLatch.countDown();
+writers.forEach((writer) => {
+ writer.join();
+});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/oplog_wallclock.js b/jstests/replsets/oplog_wallclock.js
index 2da05916c1f..a744c207d7e 100644
--- a/jstests/replsets/oplog_wallclock.js
+++ b/jstests/replsets/oplog_wallclock.js
@@ -1,30 +1,30 @@
// oplog should contain the field "wt" with wallClock timestamps.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
+'use strict';
+load('jstests/replsets/rslib.js');
- var assertLastOplogHasWT = function(primary, msg) {
- const opLogEntry = getLatestOp(primary);
- assert(opLogEntry.hasOwnProperty('wall'),
- 'oplog entry must contain wt field: ' + tojson(opLogEntry));
- };
+var assertLastOplogHasWT = function(primary, msg) {
+ const opLogEntry = getLatestOp(primary);
+ assert(opLogEntry.hasOwnProperty('wall'),
+ 'oplog entry must contain wt field: ' + tojson(opLogEntry));
+};
- var name = 'wt_test_coll';
- var replSet = new ReplSetTest({nodes: 1, oplogSize: 2});
- replSet.startSet();
- replSet.initiate();
+var name = 'wt_test_coll';
+var replSet = new ReplSetTest({nodes: 1, oplogSize: 2});
+replSet.startSet();
+replSet.initiate();
- var primary = replSet.getPrimary();
- var collection = primary.getDB('test').getCollection(name);
+var primary = replSet.getPrimary();
+var collection = primary.getDB('test').getCollection(name);
- assert.writeOK(collection.insert({_id: 1, val: 'x'}));
- assertLastOplogHasWT(primary, 'insert');
+assert.writeOK(collection.insert({_id: 1, val: 'x'}));
+assertLastOplogHasWT(primary, 'insert');
- assert.writeOK(collection.update({_id: 1}, {val: 'y'}));
- assertLastOplogHasWT(primary, 'update');
+assert.writeOK(collection.update({_id: 1}, {val: 'y'}));
+assertLastOplogHasWT(primary, 'update');
- assert.writeOK(collection.remove({_id: 1}));
- assertLastOplogHasWT(primary, 'remove');
+assert.writeOK(collection.remove({_id: 1}));
+assertLastOplogHasWT(primary, 'remove');
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js
index acd72662574..867c6258441 100644
--- a/jstests/replsets/optime.js
+++ b/jstests/replsets/optime.js
@@ -40,11 +40,11 @@ function optimesAndWallTimesAreEqual(replTest, isPersistent) {
if (timestampCompare(prevOptime, currOptime) != 0 ||
wallTimeCompare(prevAppliedWallTime, currAppliedWallTime) != 0 ||
(isPersistent && wallTimeCompare(prevDurableWallTime, currDurableWallTime) != 0)) {
- jsTest.log("optimesAndWallTimesAreEqual returning false match, prevOptime: " +
- prevOptime + " latestOptime: " + currOptime + " prevAppliedWallTime: " +
- prevAppliedWallTime + " latestWallTime: " + currAppliedWallTime +
- " prevDurableWallTime: " + prevDurableWallTime + " latestDurableWallTime: " +
- currDurableWallTime);
+ jsTest.log(
+ "optimesAndWallTimesAreEqual returning false match, prevOptime: " + prevOptime +
+ " latestOptime: " + currOptime + " prevAppliedWallTime: " + prevAppliedWallTime +
+ " latestWallTime: " + currAppliedWallTime + " prevDurableWallTime: " +
+ prevDurableWallTime + " latestDurableWallTime: " + currDurableWallTime);
replTest.dumpOplog(replTest.nodes[i], {}, 20);
replTest.dumpOplog(replTest.nodes[i - 1], {}, 20);
return false;
diff --git a/jstests/replsets/prepare_conflict_read_concern_behavior.js b/jstests/replsets/prepare_conflict_read_concern_behavior.js
index a4c14ae0a46..22515e3eb73 100644
--- a/jstests/replsets/prepare_conflict_read_concern_behavior.js
+++ b/jstests/replsets/prepare_conflict_read_concern_behavior.js
@@ -20,330 +20,322 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const conn = replTest.getPrimary();
-
- const failureTimeout = 1 * 1000; // 1 second.
- const successTimeout = 5 * 60 * 1000; // 5 minutes.
- const dbName = "test";
- const collName = "prepare_conflict_read_concern_behavior";
- const collName2 = "prepare_conflict_read_concern_behavior2";
- const testDB = conn.getDB(dbName);
- const testColl = testDB.getCollection(collName);
- const testColl2 = testDB.getCollection(collName2);
-
- const secondary = replTest.getSecondary();
- const secondaryTestDB = secondary.getDB(dbName);
-
- // Turn off timestamp reaping so that clusterTimeBeforePrepare doesn't get too old.
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const conn = replTest.getPrimary();
+
+const failureTimeout = 1 * 1000; // 1 second.
+const successTimeout = 5 * 60 * 1000; // 5 minutes.
+const dbName = "test";
+const collName = "prepare_conflict_read_concern_behavior";
+const collName2 = "prepare_conflict_read_concern_behavior2";
+const testDB = conn.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+const testColl2 = testDB.getCollection(collName2);
+
+const secondary = replTest.getSecondary();
+const secondaryTestDB = secondary.getDB(dbName);
+
+// Turn off timestamp reaping so that clusterTimeBeforePrepare doesn't get too old.
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn",
+}));
+
+function runTest() {
+ testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+ assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+ testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}});
+ assert.commandWorked(testDB.runCommand({create: collName2, writeConcern: {w: "majority"}}));
+
+ const session = conn.startSession({causalConsistency: false});
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB.getCollection(collName);
+
+ const read = function(read_concern, timeout, db, coll, num_expected) {
+ let res = db.runCommand({
+ find: coll,
+ filter: {in_prepared_txn: false},
+ readConcern: read_concern,
+ maxTimeMS: timeout,
+ });
+
+ if (num_expected) {
+ assert(res.cursor, tojson(res));
+ assert.eq(res.cursor.firstBatch.length, num_expected, tojson(res));
+ }
+ return res;
+ };
+
+ const dbHash = function(read_concern, db, timeout = successTimeout) {
+ let res = db.runCommand({
+ dbHash: 1,
+ readConcern: read_concern,
+ maxTimeMS: timeout,
+ });
+
+ return res;
+ };
+
+ const mapReduce = function(
+ read_concern, db, outOptions = {inline: 1}, timeout = successTimeout) {
+ let map = function() {
+ emit(this.a, this.a);
+ };
+ let reduce = function(key, vals) {
+ return 1;
+ };
+ let res = db.runCommand({
+ mapReduce: collName,
+ map: map,
+ reduce: reduce,
+ out: outOptions,
+ readConcern: read_concern,
+ maxTimeMS: timeout,
+ });
+ return res;
+ };
+
+ const validate = function(read_concern, db, timeout = successTimeout) {
+ let res = db.runCommand({
+ validate: collName,
+ readConcern: read_concern,
+ maxTimeMS: timeout,
+ });
+
+ return res;
+ };
+
+ assert.commandWorked(
+ testColl.insert({_id: 1, in_prepared_txn: false}, {writeConcern: {w: "majority"}}));
+ assert.commandWorked(testColl.insert({_id: 2, in_prepared_txn: false}));
+ assert.commandWorked(testColl2.insert({_id: 1, in_prepared_txn: false}));
+
+ session.startTransaction();
+ const clusterTimeBeforePrepare =
+ assert.commandWorked(sessionColl.runCommand("insert", {documents: [{_id: 3}]}))
+ .operationTime;
+ assert.commandWorked(sessionColl.update({_id: 2}, {_id: 2, in_prepared_txn: true}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ const clusterTimeAfterPrepare =
+ assert
+ .commandWorked(testColl.runCommand(
+ "insert",
+ {documents: [{_id: 4, in_prepared_txn: false}], writeConcern: {w: "majority"}}))
+ .operationTime;
+
+ jsTestLog("prepareTimestamp: " + prepareTimestamp + " clusterTimeBeforePrepare: " +
+ clusterTimeBeforePrepare + " clusterTimeAfterPrepare: " + clusterTimeAfterPrepare);
+
+ assert.gt(prepareTimestamp, clusterTimeBeforePrepare);
+ assert.gt(clusterTimeAfterPrepare, prepareTimestamp);
+
+ jsTestLog("Test read with read concern 'majority' doesn't block on a prepared transaction.");
+ assert.commandWorked(read({level: 'majority'}, successTimeout, testDB, collName, 3));
+
+ jsTestLog("Test read with read concern 'local' doesn't block on a prepared transaction.");
+ assert.commandWorked(read({level: 'local'}, successTimeout, testDB, collName, 3));
+
+ jsTestLog("Test read with read concern 'available' doesn't block on a prepared transaction.");
+ assert.commandWorked(read({level: 'available'}, successTimeout, testDB, collName, 3));
+
+ jsTestLog("Test read with read concern 'linearizable' blocks on a prepared transaction.");
+ assert.commandFailedWithCode(read({level: 'linearizable'}, failureTimeout, testDB, collName),
+ ErrorCodes.MaxTimeMSExpired);
+
+ // TODO SERVER-36953: uncomment this test
+ // jsTestLog("Test afterClusterTime read before prepareTimestamp doesn't block on a " +
+ // "prepared transaction.");
+ // assert.commandWorked(read({level: 'local', afterClusterTime: clusterTimeBeforePrepare},
+ // successTimeout,
+ // testDB,
+ // collName,
+ // 2));
+
+ jsTestLog("Test afterClusterTime read after prepareTimestamp blocks on a prepared " +
+ "transaction.");
+ assert.commandFailedWithCode(read({level: 'local', afterClusterTime: clusterTimeAfterPrepare},
+ failureTimeout,
+ testDB,
+ collName),
+ ErrorCodes.MaxTimeMSExpired);
+
+ jsTestLog("Test read with afterClusterTime after prepareTimestamp on non-prepared " +
+ "documents doesn't block on a prepared transaction.");
+ assert.commandWorked(read({level: 'local', afterClusterTime: clusterTimeAfterPrepare},
+ successTimeout,
+ testDB,
+ collName2,
+ 1));
+
+ // dbHash does not accept a non local read concern or afterClusterTime and it also sets
+ // ignore_prepare=true during its execution. Therefore, dbHash should never get prepare
+ // conflicts on secondaries. dbHash acquires collection S lock for reads and it will be
+ // blocked by a prepared transaction that writes to the same collection if it is run on
+ // primaries.
+ jsTestLog("Test dbHash doesn't support afterClusterTime read.");
+ assert.commandFailedWithCode(
+ dbHash({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test dbHash doesn't support read concern other than local.");
+ assert.commandWorked(dbHash({level: 'local'}, secondaryTestDB));
+ assert.commandFailedWithCode(dbHash({level: 'available'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(dbHash({level: 'majority'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(dbHash({level: 'snapshot'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(dbHash({level: 'linearizable'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test dbHash on secondary doesn't block on a prepared transaction.");
+ assert.commandWorked(dbHash({}, secondaryTestDB));
+ jsTestLog("Test dbHash on primary blocks on collection S lock which conflicts with " +
+ "a prepared transaction.");
+ assert.commandFailedWithCode(dbHash({}, testDB, failureTimeout), ErrorCodes.MaxTimeMSExpired);
+
+ // mapReduce does not accept a non local read concern or afterClusterTime and it also sets
+ // ignore_prepare=true during its read phase. As mapReduce that writes is not allowed to run
+ // on secondaries, mapReduce should never get prepare conflicts on secondaries. mapReduce
+ // acquires collection S lock for reads and it will be blocked by a prepared transaction
+ // that writes to the same collection if it is run on primaries.
+ jsTestLog("Test mapReduce doesn't support afterClusterTime read.");
+ assert.commandFailedWithCode(
+ mapReduce({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test mapReduce doesn't support read concern other than local.");
+ assert.commandWorked(mapReduce({level: 'local'}, secondaryTestDB));
+ assert.commandFailedWithCode(mapReduce({level: 'available'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(mapReduce({level: 'majority'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(mapReduce({level: 'snapshot'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(mapReduce({level: 'linearizable'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test mapReduce that writes is not allowed to run on secondaries.");
+ // It currently returns ErrorCodes.PrimarySteppedDown in this case.
+ assert.commandFailedWithCode(mapReduce({}, secondaryTestDB, "outColl"),
+ [ErrorCodes.InvalidOptions, ErrorCodes.PrimarySteppedDown]);
+
+ jsTestLog("Test mapReduce on secondary doesn't block on a prepared transaction.");
+ assert.commandWorked(mapReduce({}, secondaryTestDB));
+
+ jsTestLog("Test mapReduce on primary blocks on collection S lock which conflicts with " +
+ "a prepared transaction.");
+ assert.commandFailedWithCode(mapReduce({}, testDB, {inline: 1}, failureTimeout),
+ ErrorCodes.MaxTimeMSExpired);
+
+ // validate does not accept a non local read concern or afterClusterTime and it also sets
+ // ignore_prepare=true during its execution. Therefore, validate should never get prepare
+ // conflicts on secondaries. validate acquires collection X lock during its execution and it
+ // will be blocked by a prepared transaction that writes to the same collection if it is run
+ // on primaries.
+ jsTestLog("Test validate doesn't support afterClusterTime read.");
+ assert.commandFailedWithCode(
+ validate({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ jsTestLog("Test validate doesn't support read concern other than local.");
+ assert.commandWorked(validate({level: 'local'}, secondaryTestDB));
+ assert.commandFailedWithCode(validate({level: 'available'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(validate({level: 'majority'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(validate({level: 'snapshot'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+ assert.commandFailedWithCode(validate({level: 'linearizable'}, secondaryTestDB),
+ ErrorCodes.InvalidOptions);
+
+ jsTestLog("Test validate on secondary doesn't block on a prepared transaction.");
+ assert.commandWorked(validate({}, secondaryTestDB));
+ jsTestLog("Test validate on primary blocks on collection X lock which conflicts with " +
+ "a prepared transaction.");
+ assert.commandFailedWithCode(validate({}, testDB, failureTimeout), ErrorCodes.MaxTimeMSExpired);
+
+ jsTestLog("Test read from an update blocks on a prepared transaction.");
+ assert.commandFailedWithCode(testDB.runCommand({
+ update: collName,
+ updates: [{q: {_id: 2}, u: {_id: 2, in_prepared_txn: false, a: 1}}],
+ maxTimeMS: failureTimeout,
+ }),
+ ErrorCodes.MaxTimeMSExpired);
+
+ // Create a second session and start a new transaction to test snapshot reads.
+ const session2 = conn.startSession({causalConsistency: false});
+ const sessionDB2 = session2.getDatabase(dbName);
+ const sessionColl2 = sessionDB2.getCollection(collName);
+ // This makes future reads in the transaction use a read timestamp after the
+ // prepareTimestamp.
+ session2.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: clusterTimeAfterPrepare}});
+
+ jsTestLog("Test read with read concern 'snapshot' and a read timestamp after " +
+ "prepareTimestamp on non-prepared documents doesn't block on a prepared " +
+ "transaction.");
+ assert.commandWorked(read({}, successTimeout, sessionDB2, collName2, 1));
+
+ jsTestLog("Test read with read concern 'snapshot' and a read timestamp after " +
+ "prepareTimestamp blocks on a prepared transaction.");
+ assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ jsTestLog("Test read with read concern 'snapshot' and atClusterTime before " +
+ "prepareTimestamp doesn't block on a prepared transaction.");
+ session2.startTransaction(
+ {readConcern: {level: "snapshot", atClusterTime: clusterTimeBeforePrepare}});
+ assert.commandWorked(read({}, successTimeout, sessionDB2, collName, 2));
+ assert.commandWorked(session2.abortTransaction_forTesting());
+
+ jsTestLog("Test read from a transaction with read concern 'majority' blocks on a prepared" +
+ " transaction.");
+ session2.startTransaction({readConcern: {level: "majority"}});
+ assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ jsTestLog("Test read from a transaction with read concern 'local' blocks on a prepared " +
+ "transaction.");
+ session2.startTransaction({readConcern: {level: "local"}});
+ assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ jsTestLog("Test read from a transaction with no read concern specified blocks on a " +
+ "prepared transaction.");
+ session2.startTransaction();
+ assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
+ ErrorCodes.MaxTimeMSExpired);
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ session2.endSession();
+
+ assert.commandWorked(session.abortTransaction_forTesting());
+ session.endSession();
+}
+
+try {
+ runTest();
+} finally {
+ // Turn this failpoint off so that it doesn't impact other tests in the suite.
assert.commandWorked(testDB.adminCommand({
configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn",
+ mode: "off",
}));
+}
- function runTest() {
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- testDB.runCommand({drop: collName2, writeConcern: {w: "majority"}});
- assert.commandWorked(testDB.runCommand({create: collName2, writeConcern: {w: "majority"}}));
-
- const session = conn.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- const read = function(read_concern, timeout, db, coll, num_expected) {
- let res = db.runCommand({
- find: coll,
- filter: {in_prepared_txn: false},
- readConcern: read_concern,
- maxTimeMS: timeout,
- });
-
- if (num_expected) {
- assert(res.cursor, tojson(res));
- assert.eq(res.cursor.firstBatch.length, num_expected, tojson(res));
- }
- return res;
- };
-
- const dbHash = function(read_concern, db, timeout = successTimeout) {
- let res = db.runCommand({
- dbHash: 1,
- readConcern: read_concern,
- maxTimeMS: timeout,
- });
-
- return res;
- };
-
- const mapReduce = function(
- read_concern, db, outOptions = {inline: 1}, timeout = successTimeout) {
- let map = function() {
- emit(this.a, this.a);
- };
- let reduce = function(key, vals) {
- return 1;
- };
- let res = db.runCommand({
- mapReduce: collName,
- map: map,
- reduce: reduce,
- out: outOptions,
- readConcern: read_concern,
- maxTimeMS: timeout,
- });
- return res;
- };
-
- const validate = function(read_concern, db, timeout = successTimeout) {
- let res = db.runCommand({
- validate: collName,
- readConcern: read_concern,
- maxTimeMS: timeout,
- });
-
- return res;
- };
-
- assert.commandWorked(
- testColl.insert({_id: 1, in_prepared_txn: false}, {writeConcern: {w: "majority"}}));
- assert.commandWorked(testColl.insert({_id: 2, in_prepared_txn: false}));
- assert.commandWorked(testColl2.insert({_id: 1, in_prepared_txn: false}));
-
- session.startTransaction();
- const clusterTimeBeforePrepare =
- assert.commandWorked(sessionColl.runCommand("insert", {documents: [{_id: 3}]}))
- .operationTime;
- assert.commandWorked(sessionColl.update({_id: 2}, {_id: 2, in_prepared_txn: true}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const clusterTimeAfterPrepare =
- assert
- .commandWorked(testColl.runCommand(
- "insert",
- {documents: [{_id: 4, in_prepared_txn: false}], writeConcern: {w: "majority"}}))
- .operationTime;
-
- jsTestLog("prepareTimestamp: " + prepareTimestamp + " clusterTimeBeforePrepare: " +
- clusterTimeBeforePrepare + " clusterTimeAfterPrepare: " +
- clusterTimeAfterPrepare);
-
- assert.gt(prepareTimestamp, clusterTimeBeforePrepare);
- assert.gt(clusterTimeAfterPrepare, prepareTimestamp);
-
- jsTestLog(
- "Test read with read concern 'majority' doesn't block on a prepared transaction.");
- assert.commandWorked(read({level: 'majority'}, successTimeout, testDB, collName, 3));
-
- jsTestLog("Test read with read concern 'local' doesn't block on a prepared transaction.");
- assert.commandWorked(read({level: 'local'}, successTimeout, testDB, collName, 3));
-
- jsTestLog(
- "Test read with read concern 'available' doesn't block on a prepared transaction.");
- assert.commandWorked(read({level: 'available'}, successTimeout, testDB, collName, 3));
-
- jsTestLog("Test read with read concern 'linearizable' blocks on a prepared transaction.");
- assert.commandFailedWithCode(
- read({level: 'linearizable'}, failureTimeout, testDB, collName),
- ErrorCodes.MaxTimeMSExpired);
-
- // TODO SERVER-36953: uncomment this test
- // jsTestLog("Test afterClusterTime read before prepareTimestamp doesn't block on a " +
- // "prepared transaction.");
- // assert.commandWorked(read({level: 'local', afterClusterTime: clusterTimeBeforePrepare},
- // successTimeout,
- // testDB,
- // collName,
- // 2));
-
- jsTestLog("Test afterClusterTime read after prepareTimestamp blocks on a prepared " +
- "transaction.");
- assert.commandFailedWithCode(
- read({level: 'local', afterClusterTime: clusterTimeAfterPrepare},
- failureTimeout,
- testDB,
- collName),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Test read with afterClusterTime after prepareTimestamp on non-prepared " +
- "documents doesn't block on a prepared transaction.");
- assert.commandWorked(read({level: 'local', afterClusterTime: clusterTimeAfterPrepare},
- successTimeout,
- testDB,
- collName2,
- 1));
-
- // dbHash does not accept a non local read concern or afterClusterTime and it also sets
- // ignore_prepare=true during its execution. Therefore, dbHash should never get prepare
- // conflicts on secondaries. dbHash acquires collection S lock for reads and it will be
- // blocked by a prepared transaction that writes to the same collection if it is run on
- // primaries.
- jsTestLog("Test dbHash doesn't support afterClusterTime read.");
- assert.commandFailedWithCode(
- dbHash({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test dbHash doesn't support read concern other than local.");
- assert.commandWorked(dbHash({level: 'local'}, secondaryTestDB));
- assert.commandFailedWithCode(dbHash({level: 'available'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(dbHash({level: 'majority'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(dbHash({level: 'snapshot'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(dbHash({level: 'linearizable'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test dbHash on secondary doesn't block on a prepared transaction.");
- assert.commandWorked(dbHash({}, secondaryTestDB));
- jsTestLog("Test dbHash on primary blocks on collection S lock which conflicts with " +
- "a prepared transaction.");
- assert.commandFailedWithCode(dbHash({}, testDB, failureTimeout),
- ErrorCodes.MaxTimeMSExpired);
-
- // mapReduce does not accept a non local read concern or afterClusterTime and it also sets
- // ignore_prepare=true during its read phase. As mapReduce that writes is not allowed to run
- // on secondaries, mapReduce should never get prepare conflicts on secondaries. mapReduce
- // acquires collection S lock for reads and it will be blocked by a prepared transaction
- // that writes to the same collection if it is run on primaries.
- jsTestLog("Test mapReduce doesn't support afterClusterTime read.");
- assert.commandFailedWithCode(
- mapReduce({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test mapReduce doesn't support read concern other than local.");
- assert.commandWorked(mapReduce({level: 'local'}, secondaryTestDB));
- assert.commandFailedWithCode(mapReduce({level: 'available'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(mapReduce({level: 'majority'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(mapReduce({level: 'snapshot'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(mapReduce({level: 'linearizable'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test mapReduce that writes is not allowed to run on secondaries.");
- // It currently returns ErrorCodes.PrimarySteppedDown in this case.
- assert.commandFailedWithCode(mapReduce({}, secondaryTestDB, "outColl"),
- [ErrorCodes.InvalidOptions, ErrorCodes.PrimarySteppedDown]);
-
- jsTestLog("Test mapReduce on secondary doesn't block on a prepared transaction.");
- assert.commandWorked(mapReduce({}, secondaryTestDB));
-
- jsTestLog("Test mapReduce on primary blocks on collection S lock which conflicts with " +
- "a prepared transaction.");
- assert.commandFailedWithCode(mapReduce({}, testDB, {inline: 1}, failureTimeout),
- ErrorCodes.MaxTimeMSExpired);
-
- // validate does not accept a non local read concern or afterClusterTime and it also sets
- // ignore_prepare=true during its execution. Therefore, validate should never get prepare
- // conflicts on secondaries. validate acquires collection X lock during its execution and it
- // will be blocked by a prepared transaction that writes to the same collection if it is run
- // on primaries.
- jsTestLog("Test validate doesn't support afterClusterTime read.");
- assert.commandFailedWithCode(
- validate({level: 'local', afterClusterTime: clusterTimeAfterPrepare}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- jsTestLog("Test validate doesn't support read concern other than local.");
- assert.commandWorked(validate({level: 'local'}, secondaryTestDB));
- assert.commandFailedWithCode(validate({level: 'available'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(validate({level: 'majority'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(validate({level: 'snapshot'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
- assert.commandFailedWithCode(validate({level: 'linearizable'}, secondaryTestDB),
- ErrorCodes.InvalidOptions);
-
- jsTestLog("Test validate on secondary doesn't block on a prepared transaction.");
- assert.commandWorked(validate({}, secondaryTestDB));
- jsTestLog("Test validate on primary blocks on collection X lock which conflicts with " +
- "a prepared transaction.");
- assert.commandFailedWithCode(validate({}, testDB, failureTimeout),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Test read from an update blocks on a prepared transaction.");
- assert.commandFailedWithCode(testDB.runCommand({
- update: collName,
- updates: [{q: {_id: 2}, u: {_id: 2, in_prepared_txn: false, a: 1}}],
- maxTimeMS: failureTimeout,
- }),
- ErrorCodes.MaxTimeMSExpired);
-
- // Create a second session and start a new transaction to test snapshot reads.
- const session2 = conn.startSession({causalConsistency: false});
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
- // This makes future reads in the transaction use a read timestamp after the
- // prepareTimestamp.
- session2.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: clusterTimeAfterPrepare}});
-
- jsTestLog("Test read with read concern 'snapshot' and a read timestamp after " +
- "prepareTimestamp on non-prepared documents doesn't block on a prepared " +
- "transaction.");
- assert.commandWorked(read({}, successTimeout, sessionDB2, collName2, 1));
-
- jsTestLog("Test read with read concern 'snapshot' and a read timestamp after " +
- "prepareTimestamp blocks on a prepared transaction.");
- assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Test read with read concern 'snapshot' and atClusterTime before " +
- "prepareTimestamp doesn't block on a prepared transaction.");
- session2.startTransaction(
- {readConcern: {level: "snapshot", atClusterTime: clusterTimeBeforePrepare}});
- assert.commandWorked(read({}, successTimeout, sessionDB2, collName, 2));
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- jsTestLog("Test read from a transaction with read concern 'majority' blocks on a prepared" +
- " transaction.");
- session2.startTransaction({readConcern: {level: "majority"}});
- assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Test read from a transaction with read concern 'local' blocks on a prepared " +
- "transaction.");
- session2.startTransaction({readConcern: {level: "local"}});
- assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTestLog("Test read from a transaction with no read concern specified blocks on a " +
- "prepared transaction.");
- session2.startTransaction();
- assert.commandFailedWithCode(read({}, failureTimeout, sessionDB2, collName),
- ErrorCodes.MaxTimeMSExpired);
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- session2.endSession();
-
- assert.commandWorked(session.abortTransaction_forTesting());
- session.endSession();
- }
-
- try {
- runTest();
- } finally {
- // Turn this failpoint off so that it doesn't impact other tests in the suite.
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "off",
- }));
- }
-
- replTest.stopSet();
-
+replTest.stopSet();
}());
diff --git a/jstests/replsets/prepare_failover_rollback_commit.js b/jstests/replsets/prepare_failover_rollback_commit.js
index 979ba52eea8..487e4620ea6 100644
--- a/jstests/replsets/prepare_failover_rollback_commit.js
+++ b/jstests/replsets/prepare_failover_rollback_commit.js
@@ -9,59 +9,59 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/rollback_test.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/replsets/libs/rollback_test.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "prepare_failover_rollback_commit";
+const dbName = "test";
+const collName = "prepare_failover_rollback_commit";
- const rollbackTest = new RollbackTest(collName);
+const rollbackTest = new RollbackTest(collName);
- let primary = rollbackTest.getPrimary();
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+let primary = rollbackTest.getPrimary();
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- // First create the collection for all.
- assert.commandWorked(testColl.insert({"a": "baseDoc"}));
+// First create the collection for all.
+assert.commandWorked(testColl.insert({"a": "baseDoc"}));
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({"b": "transactionDoc"}));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({"b": "transactionDoc"}));
- // Prepare a transaction. This will be replicated to the secondary.
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// Prepare a transaction. This will be replicated to the secondary.
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- // Do a failover first, without rolling back any of the data from this test. We want the
- // current secondary to become primary and inherit the prepared transaction.
- rollbackTest.transitionToRollbackOperations();
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+// Do a failover first, without rolling back any of the data from this test. We want the
+// current secondary to become primary and inherit the prepared transaction.
+rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
- // Now set up a rollback scenario for that new primary.
- rollbackTest.transitionToRollbackOperations();
+// Now set up a rollback scenario for that new primary.
+rollbackTest.transitionToRollbackOperations();
- // Create a proxy session to reuse the session state of the old primary.
- primary = rollbackTest.getPrimary();
- const newSession1 = new _DelegatingDriverSession(primary, session);
+// Create a proxy session to reuse the session state of the old primary.
+primary = rollbackTest.getPrimary();
+const newSession1 = new _DelegatingDriverSession(primary, session);
- // Commit the transaction on this primary. We expect the commit to roll back.
- assert.commandWorked(PrepareHelpers.commitTransaction(newSession1, prepareTimestamp));
+// Commit the transaction on this primary. We expect the commit to roll back.
+assert.commandWorked(PrepareHelpers.commitTransaction(newSession1, prepareTimestamp));
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
- // Create a proxy session to reuse the session state of the old primary.
- primary = rollbackTest.getPrimary();
- const newSession2 = new _DelegatingDriverSession(primary, session);
+// Create a proxy session to reuse the session state of the old primary.
+primary = rollbackTest.getPrimary();
+const newSession2 = new _DelegatingDriverSession(primary, session);
- // Commit the transaction for all to conclude the test.
- assert.commandWorked(PrepareHelpers.commitTransaction(newSession2, prepareTimestamp));
+// Commit the transaction for all to conclude the test.
+assert.commandWorked(PrepareHelpers.commitTransaction(newSession2, prepareTimestamp));
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/prepare_prepared_transaction_wc_timeout.js b/jstests/replsets/prepare_prepared_transaction_wc_timeout.js
index cbda29be3b1..536adff487a 100644
--- a/jstests/replsets/prepare_prepared_transaction_wc_timeout.js
+++ b/jstests/replsets/prepare_prepared_transaction_wc_timeout.js
@@ -5,76 +5,75 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/libs/retryable_writes_util.js");
- load("jstests/libs/write_concern_util.js");
+"use strict";
+load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/write_concern_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- const kNodes = 2;
+const kNodes = 2;
- const replTest = new ReplSetTest({nodes: kNodes});
- replTest.startSet({verbose: 1});
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: kNodes});
+replTest.startSet({verbose: 1});
+replTest.initiate();
- const priConn = replTest.getPrimary();
- const secConn = replTest.getSecondary();
+const priConn = replTest.getPrimary();
+const secConn = replTest.getSecondary();
- const lsid = UUID();
+const lsid = UUID();
- // Insert something into the user collection.
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(34),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
+// Insert something into the user collection.
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(34),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
- // Since we must wait for writeConcern : majority in order for the prepareTimestamp to be
- // committed, this test case will timeout when we stop replication on the secondary.
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- prepareTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin',
- function(conn) {
- assert.commandWorked(conn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 50}, {_id: 70}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
+// Since we must wait for writeConcern : majority in order for the prepareTimestamp to be
+// committed, this test case will timeout when we stop replication on the secondary.
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ prepareTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin',
+ function(conn) {
+ assert.commandWorked(conn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 50}, {_id: 70}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+ }));
+ });
- });
+// Explicitly abort transaction 39 because we cannot shut down the replica set when there
+// is a prepared transaction in progress.
+// TODO: SERVER-35817 Allow shutdowns when a prepared transaction is in progress.
+assert.commandWorked(priConn.getDB('admin').runCommand({
+ abortTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(39),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+}));
- // Explicitly abort transaction 39 because we cannot shut down the replica set when there
- // is a prepared transaction in progress.
- // TODO: SERVER-35817 Allow shutdowns when a prepared transaction is in progress.
- assert.commandWorked(priConn.getDB('admin').runCommand({
- abortTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(39),
- autocommit: false,
- writeConcern: {w: 'majority'},
- }));
-
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/prepare_survives_primary_reconfig_failover.js b/jstests/replsets/prepare_survives_primary_reconfig_failover.js
index 8db3322b796..b2e0be87494 100644
--- a/jstests/replsets/prepare_survives_primary_reconfig_failover.js
+++ b/jstests/replsets/prepare_survives_primary_reconfig_failover.js
@@ -5,10 +5,10 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/prepare_failover_due_to_reconfig.js");
+"use strict";
+load("jstests/replsets/libs/prepare_failover_due_to_reconfig.js");
- let testName = "prepare_survives_primary_reconfig_failover";
+let testName = "prepare_survives_primary_reconfig_failover";
- testPrepareFailoverDueToReconfig(testName, /* reconfigOnPrimary */ true);
+testPrepareFailoverDueToReconfig(testName, /* reconfigOnPrimary */ true);
})();
diff --git a/jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js b/jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js
index bf4a0c565a9..ddeda40b99d 100644
--- a/jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js
+++ b/jstests/replsets/prepare_survives_reconfig_via_heartbeat_failover.js
@@ -6,10 +6,10 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/prepare_failover_due_to_reconfig.js");
+"use strict";
+load("jstests/replsets/libs/prepare_failover_due_to_reconfig.js");
- let testName = "prepare_survives_reconfig_via_heartbeat_failover";
+let testName = "prepare_survives_reconfig_via_heartbeat_failover";
- testPrepareFailoverDueToReconfig(testName, /* reconfigOnPrimary */ false);
+testPrepareFailoverDueToReconfig(testName, /* reconfigOnPrimary */ false);
})();
diff --git a/jstests/replsets/prepare_transaction_fails_on_standalone.js b/jstests/replsets/prepare_transaction_fails_on_standalone.js
index 14eb17656a5..b9ba8813722 100644
--- a/jstests/replsets/prepare_transaction_fails_on_standalone.js
+++ b/jstests/replsets/prepare_transaction_fails_on_standalone.js
@@ -4,18 +4,18 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- const standalone = MongoRunner.runMongod();
+const standalone = MongoRunner.runMongod();
- const collName = "prepare_transaction_fails_on_standalone";
- const dbName = "test";
- const testDB = standalone.getDB(dbName);
+const collName = "prepare_transaction_fails_on_standalone";
+const dbName = "test";
+const testDB = standalone.getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- assert.commandFailedWithCode(testDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
+assert.commandFailedWithCode(testDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
- MongoRunner.stopMongod(standalone);
+MongoRunner.stopMongod(standalone);
}());
diff --git a/jstests/replsets/prepare_transaction_fails_with_arbiters.js b/jstests/replsets/prepare_transaction_fails_with_arbiters.js
index 672ef7c147a..8190eb0f019 100644
--- a/jstests/replsets/prepare_transaction_fails_with_arbiters.js
+++ b/jstests/replsets/prepare_transaction_fails_with_arbiters.js
@@ -6,36 +6,35 @@
*/
(function() {
- "use strict";
+"use strict";
- const name = "prepare_transaction_fails_with_arbiters";
- const rst = new ReplSetTest({name: name, nodes: 2});
- const nodes = rst.nodeList();
+const name = "prepare_transaction_fails_with_arbiters";
+const rst = new ReplSetTest({name: name, nodes: 2});
+const nodes = rst.nodeList();
- rst.startSet();
- rst.initiate({
- "_id": name,
- "members":
- [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1], "arbiterOnly": true}]
- });
+rst.startSet();
+rst.initiate({
+ "_id": name,
+ "members": [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1], "arbiterOnly": true}]
+});
- const dbName = "test";
- const collName = name;
+const dbName = "test";
+const collName = name;
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 42}));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 42}));
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
+assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/prepare_transaction_fails_without_majority_reads.js b/jstests/replsets/prepare_transaction_fails_without_majority_reads.js
index 30cbeac87ee..f13c4f141e4 100644
--- a/jstests/replsets/prepare_transaction_fails_without_majority_reads.js
+++ b/jstests/replsets/prepare_transaction_fails_without_majority_reads.js
@@ -6,29 +6,29 @@
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({nodes: 1, nodeOptions: {enableMajorityReadConcern: "false"}});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({nodes: 1, nodeOptions: {enableMajorityReadConcern: "false"}});
+rst.startSet();
+rst.initiate();
- const dbName = "test";
- const collName = "prepare_transaction_fails_without_majority_reads";
+const dbName = "test";
+const collName = "prepare_transaction_fails_without_majority_reads";
- const primary = rst.getPrimary();
- const testDB = primary.getDB(dbName);
+const primary = rst.getPrimary();
+const testDB = primary.getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 42}));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 42}));
- assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
- ErrorCodes.ReadConcernMajorityNotEnabled);
+assert.commandFailedWithCode(sessionDB.adminCommand({prepareTransaction: 1}),
+ ErrorCodes.ReadConcernMajorityNotEnabled);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/prepare_transaction_index_build.js b/jstests/replsets/prepare_transaction_index_build.js
index 5b9c93d02a8..aa5d53673e1 100644
--- a/jstests/replsets/prepare_transaction_index_build.js
+++ b/jstests/replsets/prepare_transaction_index_build.js
@@ -8,80 +8,76 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName = "prepared_transactions_index_build";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- const bulk = testColl.initializeUnorderedBulkOp();
- for (let i = 0; i < 10; ++i) {
- bulk.insert({x: i});
- }
- assert.writeOK(bulk.execute());
-
- // activate failpoint to hang index build on secondary.
- secondary.getDB("admin").runCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'});
-
- jsTestLog("Starting a background index build.");
- assert.commandWorked(testDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {x: 1}, name: 'x_1'}],
- writeConcern: {w: 2},
- }));
-
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- jsTestLog("Starting a transaction that should involve the index and putting it into prepare");
-
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({x: 1000}));
-
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
- jsTestLog("Prepared a transaction at " + prepareTimestamp);
-
- jsTestLog("Unblocking index build.");
-
- // finish the index build
- secondary.getDB("admin").runCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'});
-
- // It's illegal to commit a prepared transaction before its prepare oplog entry has been
- // majority committed. So wait for prepare oplog entry to be majority committed before issuing
- // the commitTransaction command. We know the index build is also done if the prepare has
- // finished on the secondary.
- jsTestLog(
- "Waiting for prepare oplog entry to be majority committed and all index builds to finish on all nodes.");
- PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
-
- jsTestLog("Committing txn");
- // Commit the transaction.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- replTest.awaitReplication();
-
- jsTestLog("Testing index integrity");
- // Index should work.
- assert.eq(1000,
- secondary.getDB(dbName)
- .getCollection(collName)
- .find({x: 1000})
- .hint({x: 1})
- .toArray()[0]
- .x);
- jsTestLog("Shutting down the set");
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "prepared_transactions_index_build";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+const bulk = testColl.initializeUnorderedBulkOp();
+for (let i = 0; i < 10; ++i) {
+ bulk.insert({x: i});
+}
+assert.writeOK(bulk.execute());
+
+// activate failpoint to hang index build on secondary.
+secondary.getDB("admin").runCommand(
+ {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'alwaysOn'});
+
+jsTestLog("Starting a background index build.");
+assert.commandWorked(testDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {x: 1}, name: 'x_1'}],
+ writeConcern: {w: 2},
+}));
+
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+jsTestLog("Starting a transaction that should involve the index and putting it into prepare");
+
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({x: 1000}));
+
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+jsTestLog("Prepared a transaction at " + prepareTimestamp);
+
+jsTestLog("Unblocking index build.");
+
+// finish the index build
+secondary.getDB("admin").runCommand(
+ {configureFailPoint: 'hangAfterStartingIndexBuild', mode: 'off'});
+
+// It's illegal to commit a prepared transaction before its prepare oplog entry has been
+// majority committed. So wait for prepare oplog entry to be majority committed before issuing
+// the commitTransaction command. We know the index build is also done if the prepare has
+// finished on the secondary.
+jsTestLog(
+ "Waiting for prepare oplog entry to be majority committed and all index builds to finish on all nodes.");
+PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
+
+jsTestLog("Committing txn");
+// Commit the transaction.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+replTest.awaitReplication();
+
+jsTestLog("Testing index integrity");
+// Index should work.
+assert.eq(
+ 1000,
+ secondary.getDB(dbName).getCollection(collName).find({x: 1000}).hint({x: 1}).toArray()[0].x);
+jsTestLog("Shutting down the set");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/prepare_transaction_read_at_cluster_time.js b/jstests/replsets/prepare_transaction_read_at_cluster_time.js
index 8b87ce293d1..53cdbe31c6c 100644
--- a/jstests/replsets/prepare_transaction_read_at_cluster_time.js
+++ b/jstests/replsets/prepare_transaction_read_at_cluster_time.js
@@ -7,154 +7,153 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/parallelTester.js");
-
- const runDBHashFn = (host, dbName, clusterTime) => {
- const conn = new Mongo(host);
- const db = conn.getDB(dbName);
-
- conn.setSlaveOk();
- let firstHash = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: eval(clusterTime),
- }));
-
- // This code will execute once the prepared transaction is committed as the call above will
- // be blocked until an abort or commit happens. Ensure that running dbHash here yields the
- // same result as above.
- let secondHash = assert.commandWorked(db.runCommand({dbHash: 1}));
-
- assert.eq(firstHash.collections, secondHash.collections);
- assert.eq(firstHash.md5, secondHash.md5);
-
- return firstHash;
- };
-
- const runFindFn = (host, dbName, collName, clusterTime) => {
- const conn = new Mongo(host);
- const db = conn.getDB(dbName);
-
- conn.setSlaveOk();
- assert.commandWorked(db.getSiblingDB(dbName).runCommand({
- find: collName,
- $_internalReadAtClusterTime: eval(clusterTime),
- }));
- };
-
- const assertOpHasPrepareConflict = (db, commandName) => {
- assert.soon(
- () => {
- const ops = db.currentOp({
- "command.$_internalReadAtClusterTime": {$exists: true},
- ["command." + commandName]: {$exists: true},
- }).inprog;
-
- if (ops.length === 1) {
- return ops[0].prepareReadConflicts > 0;
- }
-
- return false;
- },
- () =>
- `Failed to find '${commandName}' command in the ${db.getMongo().host} currentOp()` +
- ` output: ${tojson(db.currentOp())}`);
- };
-
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
-
- const replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- rst.initiate(replSetConfig);
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
-
- const dbName = "prepare_transaction_read_at_cluster_time";
- const collName = "testColl";
-
- const testDB = primary.getDB(dbName);
- const testDBSecondary = secondary.getDB(dbName);
-
- testDB.createCollection(collName);
- assert.commandWorked(testDB.getCollection(collName).insert({x: 0}));
-
- const session = primary.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB[collName];
-
- // Perform a write inside of a prepared transaction.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({x: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Perform a write outside of a prepared transaction. We wait for the write to have replication
- // to the secondary because we're going to read from it at the returned operationTime.
- assert.commandWorked(testDB.getCollection(collName).insert({x: 2}, {writeConcern: {w: 2}}));
-
- // It should be possible to specify '$_internalReadAtClusterTime' as the timestamp of the
- // second write without an error for dbHash and find.
- let clusterTime = testDB.getSession().getOperationTime();
-
- // Run dbHash and find while the prepared transaction has not commit or aborted yet.
- // These should block until the prepared transaction commits or aborts if we specify
- // $_internalReadAtClusterTime to be the timestamp of the second write we did, outside of the
- // transaction.
- const dbHashPrimaryThread =
- new ScopedThread(runDBHashFn, primary.host, dbName, tojson(clusterTime));
- const dbHashSecondaryThread =
- new ScopedThread(runDBHashFn, secondary.host, dbName, tojson(clusterTime));
-
- dbHashPrimaryThread.start();
- dbHashSecondaryThread.start();
-
- assertOpHasPrepareConflict(testDB, "dbHash");
- assertOpHasPrepareConflict(testDBSecondary, "dbHash");
-
- // Run 'find' with '$_internalReadAtClusterTime' specified.
- const findPrimaryThread =
- new ScopedThread(runFindFn, primary.host, dbName, collName, tojson(clusterTime));
- const findSecondaryThread =
- new ScopedThread(runFindFn, secondary.host, dbName, collName, tojson(clusterTime));
-
- findPrimaryThread.start();
- findSecondaryThread.start();
-
- assertOpHasPrepareConflict(testDB, "find");
- assertOpHasPrepareConflict(testDBSecondary, "find");
-
- // Run a series of DDL operations which shouldn't block before committing the prepared
- // transaction.
- const otherDbName = "prepare_transaction_read_at_cluster_time_secondary_other";
- const otherTestDB = primary.getDB(otherDbName);
-
- assert.commandWorked(otherTestDB.runCommand({create: collName, writeConcern: {w: 2}}));
- assert.commandWorked(
- otherTestDB.runCommand({collMod: collName, validator: {v: 1}, writeConcern: {w: 2}}));
- assert.commandWorked(otherTestDB.runCommand(
- {createIndexes: collName, indexes: [{key: {x: 1}, name: 'x_1'}], writeConcern: {w: 2}}));
- assert.commandWorked(
- otherTestDB.runCommand({dropIndexes: collName, index: 'x_1', writeConcern: {w: 2}}));
-
- // Committing or aborting the transaction should unblock the parallel tasks.
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- session.endSession();
-
- dbHashPrimaryThread.join();
- dbHashSecondaryThread.join();
-
- // Ensure the dbHashes across the replica set match.
- const primaryDBHash = dbHashPrimaryThread.returnData();
- const secondaryDBHash = dbHashSecondaryThread.returnData();
-
- assert.eq(primaryDBHash.collections, secondaryDBHash.collections);
- assert.eq(primaryDBHash.md5, secondaryDBHash.md5);
-
- findPrimaryThread.join();
- findSecondaryThread.join();
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/parallelTester.js");
+
+const runDBHashFn = (host, dbName, clusterTime) => {
+ const conn = new Mongo(host);
+ const db = conn.getDB(dbName);
+
+ conn.setSlaveOk();
+ let firstHash = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: eval(clusterTime),
+ }));
+
+ // This code will execute once the prepared transaction is committed as the call above will
+ // be blocked until an abort or commit happens. Ensure that running dbHash here yields the
+ // same result as above.
+ let secondHash = assert.commandWorked(db.runCommand({dbHash: 1}));
+
+ assert.eq(firstHash.collections, secondHash.collections);
+ assert.eq(firstHash.md5, secondHash.md5);
+
+ return firstHash;
+};
+
+const runFindFn = (host, dbName, collName, clusterTime) => {
+ const conn = new Mongo(host);
+ const db = conn.getDB(dbName);
+
+ conn.setSlaveOk();
+ assert.commandWorked(db.getSiblingDB(dbName).runCommand({
+ find: collName,
+ $_internalReadAtClusterTime: eval(clusterTime),
+ }));
+};
+
+const assertOpHasPrepareConflict = (db, commandName) => {
+ assert.soon(
+ () => {
+ const ops = db.currentOp({
+ "command.$_internalReadAtClusterTime": {$exists: true},
+ ["command." + commandName]: {$exists: true},
+ }).inprog;
+
+ if (ops.length === 1) {
+ return ops[0].prepareReadConflicts > 0;
+ }
+
+ return false;
+ },
+ () => `Failed to find '${commandName}' command in the ${db.getMongo().host} currentOp()` +
+ ` output: ${tojson(db.currentOp())}`);
+};
+
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+
+const replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+rst.initiate(replSetConfig);
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+
+const dbName = "prepare_transaction_read_at_cluster_time";
+const collName = "testColl";
+
+const testDB = primary.getDB(dbName);
+const testDBSecondary = secondary.getDB(dbName);
+
+testDB.createCollection(collName);
+assert.commandWorked(testDB.getCollection(collName).insert({x: 0}));
+
+const session = primary.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB[collName];
+
+// Perform a write inside of a prepared transaction.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({x: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Perform a write outside of a prepared transaction. We wait for the write to have replication
+// to the secondary because we're going to read from it at the returned operationTime.
+assert.commandWorked(testDB.getCollection(collName).insert({x: 2}, {writeConcern: {w: 2}}));
+
+// It should be possible to specify '$_internalReadAtClusterTime' as the timestamp of the
+// second write without an error for dbHash and find.
+let clusterTime = testDB.getSession().getOperationTime();
+
+// Run dbHash and find while the prepared transaction has not commit or aborted yet.
+// These should block until the prepared transaction commits or aborts if we specify
+// $_internalReadAtClusterTime to be the timestamp of the second write we did, outside of the
+// transaction.
+const dbHashPrimaryThread =
+ new ScopedThread(runDBHashFn, primary.host, dbName, tojson(clusterTime));
+const dbHashSecondaryThread =
+ new ScopedThread(runDBHashFn, secondary.host, dbName, tojson(clusterTime));
+
+dbHashPrimaryThread.start();
+dbHashSecondaryThread.start();
+
+assertOpHasPrepareConflict(testDB, "dbHash");
+assertOpHasPrepareConflict(testDBSecondary, "dbHash");
+
+// Run 'find' with '$_internalReadAtClusterTime' specified.
+const findPrimaryThread =
+ new ScopedThread(runFindFn, primary.host, dbName, collName, tojson(clusterTime));
+const findSecondaryThread =
+ new ScopedThread(runFindFn, secondary.host, dbName, collName, tojson(clusterTime));
+
+findPrimaryThread.start();
+findSecondaryThread.start();
+
+assertOpHasPrepareConflict(testDB, "find");
+assertOpHasPrepareConflict(testDBSecondary, "find");
+
+// Run a series of DDL operations which shouldn't block before committing the prepared
+// transaction.
+const otherDbName = "prepare_transaction_read_at_cluster_time_secondary_other";
+const otherTestDB = primary.getDB(otherDbName);
+
+assert.commandWorked(otherTestDB.runCommand({create: collName, writeConcern: {w: 2}}));
+assert.commandWorked(
+ otherTestDB.runCommand({collMod: collName, validator: {v: 1}, writeConcern: {w: 2}}));
+assert.commandWorked(otherTestDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {x: 1}, name: 'x_1'}], writeConcern: {w: 2}}));
+assert.commandWorked(
+ otherTestDB.runCommand({dropIndexes: collName, index: 'x_1', writeConcern: {w: 2}}));
+
+// Committing or aborting the transaction should unblock the parallel tasks.
+PrepareHelpers.commitTransaction(session, prepareTimestamp);
+session.endSession();
+
+dbHashPrimaryThread.join();
+dbHashSecondaryThread.join();
+
+// Ensure the dbHashes across the replica set match.
+const primaryDBHash = dbHashPrimaryThread.returnData();
+const secondaryDBHash = dbHashSecondaryThread.returnData();
+
+assert.eq(primaryDBHash.collections, secondaryDBHash.collections);
+assert.eq(primaryDBHash.md5, secondaryDBHash.md5);
- rst.stopSet();
+findPrimaryThread.join();
+findSecondaryThread.join();
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js b/jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js
index fcab1ae7a27..fcc8fab4f1d 100644
--- a/jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js
+++ b/jstests/replsets/prepare_transaction_survives_state_transition_to_and_from_recovering.js
@@ -6,98 +6,97 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replSet = new ReplSetTest({nodes: 2});
- replSet.startSet();
- replSet.initiate();
+const replSet = new ReplSetTest({nodes: 2});
+replSet.startSet();
+replSet.initiate();
- const primary = replSet.getPrimary();
- const secondary = replSet.getSecondary();
+const primary = replSet.getPrimary();
+const secondary = replSet.getSecondary();
- const dbName = "test";
- const collName = "prepare_transaction_survives_state_transitions_to_and_from_recovering";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "prepare_transaction_survives_state_transitions_to_and_from_recovering";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
- assert.commandWorked(testColl.insert({_id: 1}));
+assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testColl.insert({_id: 1}));
- const session1 = primary.startSession({causalConsistency: false});
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
+const session1 = primary.startSession({causalConsistency: false});
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
- const session2 = primary.startSession({causalConsistency: false});
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+const session2 = primary.startSession({causalConsistency: false});
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- jsTestLog("Starting a transaction and putting it into prepare");
+jsTestLog("Starting a transaction and putting it into prepare");
- session1.startTransaction();
- assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
- jsTestLog("Starting a second transaction and putting it into prepare");
+jsTestLog("Starting a second transaction and putting it into prepare");
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 2}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- replSet.awaitReplication();
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+replSet.awaitReplication();
- jsTestLog("Putting secondary in maintenance mode so it will transition to RECOVERING");
+jsTestLog("Putting secondary in maintenance mode so it will transition to RECOVERING");
- assert.commandWorked(secondary.adminCommand({replSetMaintenance: 1}));
- replSet.waitForState(secondary, ReplSetTest.State.RECOVERING);
+assert.commandWorked(secondary.adminCommand({replSetMaintenance: 1}));
+replSet.waitForState(secondary, ReplSetTest.State.RECOVERING);
- jsTestLog("Commiting the second prepared transaction while a node is in the RECOVERING state");
+jsTestLog("Commiting the second prepared transaction while a node is in the RECOVERING state");
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- replSet.awaitReplication();
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+replSet.awaitReplication();
- jsTestLog("Taking secondary out of maintenance mode so it will transition back to SECONDARY");
+jsTestLog("Taking secondary out of maintenance mode so it will transition back to SECONDARY");
- assert.commandWorked(secondary.adminCommand({replSetMaintenance: 0}));
- replSet.waitForState(secondary, ReplSetTest.State.SECONDARY);
+assert.commandWorked(secondary.adminCommand({replSetMaintenance: 0}));
+replSet.waitForState(secondary, ReplSetTest.State.SECONDARY);
- jsTestLog("Stepping up the secondary");
+jsTestLog("Stepping up the secondary");
- replSet.stepUp(secondary);
- replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
- const newPrimary = replSet.getPrimary();
- const newPrimaryDB = newPrimary.getDB(dbName);
+replSet.stepUp(secondary);
+replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
+const newPrimary = replSet.getPrimary();
+const newPrimaryDB = newPrimary.getDB(dbName);
- // Create a proxy session to reuse the session state of the old primary.
- const newSession = new _DelegatingDriverSession(newPrimary, session1);
+// Create a proxy session to reuse the session state of the old primary.
+const newSession = new _DelegatingDriverSession(newPrimary, session1);
- jsTestLog("Make sure that the transaction is properly prepared");
+jsTestLog("Make sure that the transaction is properly prepared");
- // Make sure that we can't read changes to the document from the second transaction after
- // recovery.
- assert.eq(newPrimaryDB.getCollection(collName).find().toArray(), [{_id: 1}, {_id: 2}]);
+// Make sure that we can't read changes to the document from the second transaction after
+// recovery.
+assert.eq(newPrimaryDB.getCollection(collName).find().toArray(), [{_id: 1}, {_id: 2}]);
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- newPrimaryDB.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 1}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ newPrimaryDB.runCommand(
+ {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 1}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(
- newSession.getDatabase(dbName).getCollection(collName).insert({_id: 3}),
- ErrorCodes.PreparedTransactionInProgress);
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(
+ newSession.getDatabase(dbName).getCollection(collName).insert({_id: 3}),
+ ErrorCodes.PreparedTransactionInProgress);
- jsTestLog("Verify that the locks from the prepared transaction are still held");
+jsTestLog("Verify that the locks from the prepared transaction are still held");
- assert.commandFailedWithCode(newPrimaryDB.runCommand({drop: collName, maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
+assert.commandFailedWithCode(newPrimaryDB.runCommand({drop: collName, maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
- jsTestLog("Committing transaction");
+jsTestLog("Committing transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(newSession, prepareTimestamp1));
- replSet.awaitReplication();
-
- replSet.stopSet();
+assert.commandWorked(PrepareHelpers.commitTransaction(newSession, prepareTimestamp1));
+replSet.awaitReplication();
+replSet.stopSet();
}());
diff --git a/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js b/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js
index be5f67f2e2d..101c12252a0 100644
--- a/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js
+++ b/jstests/replsets/prepared_transaction_commands_fail_on_secondaries.js
@@ -6,74 +6,74 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const name = "prepared_transaction_commands_fail_on_secondaries";
- const rst = new ReplSetTest({
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- ],
- });
- const nodes = rst.startSet();
- rst.initiate();
+const name = "prepared_transaction_commands_fail_on_secondaries";
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+});
+const nodes = rst.startSet();
+rst.initiate();
- const dbName = "test";
- const collName = name;
+const dbName = "test";
+const collName = name;
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const testDB = primary.getDB(dbName);
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const testDB = primary.getDB(dbName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const priSession = primary.startSession({causalConsistency: false});
- const priSessionDB = priSession.getDatabase(dbName);
- const priSessionColl = priSessionDB.getCollection(collName);
+const priSession = primary.startSession({causalConsistency: false});
+const priSessionDB = priSession.getDatabase(dbName);
+const priSessionColl = priSessionDB.getCollection(collName);
- const secSession = PrepareHelpers.createSessionWithGivenId(
- secondary, priSession.getSessionId(), {causalConsistency: false});
+const secSession = PrepareHelpers.createSessionWithGivenId(
+ secondary, priSession.getSessionId(), {causalConsistency: false});
- priSession.startTransaction();
- const doc = {_id: 1};
- assert.commandWorked(priSessionColl.insert(doc));
- rst.awaitReplication();
+priSession.startTransaction();
+const doc = {
+ _id: 1
+};
+assert.commandWorked(priSessionColl.insert(doc));
+rst.awaitReplication();
- jsTestLog("Test that prepare fails on a secondary");
- const txnNumber = NumberLong(priSession.getTxnNumber_forTesting());
- assert.commandFailedWithCode(
- secSession.getDatabase('admin').adminCommand(
- {prepareTransaction: 1, txnNumber: txnNumber, autocommit: false}),
- ErrorCodes.NotMaster);
+jsTestLog("Test that prepare fails on a secondary");
+const txnNumber = NumberLong(priSession.getTxnNumber_forTesting());
+assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand(
+ {prepareTransaction: 1, txnNumber: txnNumber, autocommit: false}),
+ ErrorCodes.NotMaster);
- const prepareTimestamp = PrepareHelpers.prepareTransaction(priSession);
- rst.awaitReplication();
+const prepareTimestamp = PrepareHelpers.prepareTransaction(priSession);
+rst.awaitReplication();
- jsTestLog("Test that prepared commit fails on a secondary");
- // Add 1 to the increment so that the commitTimestamp is "after" the prepareTimestamp.
- const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
- assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand({
- commitTransaction: 1,
- commitTimestamp: commitTimestamp,
- txnNumber: txnNumber,
- autocommit: false
- }),
- ErrorCodes.NotMaster);
+jsTestLog("Test that prepared commit fails on a secondary");
+// Add 1 to the increment so that the commitTimestamp is "after" the prepareTimestamp.
+const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
+assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: commitTimestamp,
+ txnNumber: txnNumber,
+ autocommit: false
+}),
+ ErrorCodes.NotMaster);
- jsTestLog("Test that prepared abort fails on a secondary");
- assert.commandFailedWithCode(
- secSession.getDatabase('admin').adminCommand(
- {abortTransaction: 1, txnNumber: txnNumber, autocommit: false}),
- ErrorCodes.NotMaster);
+jsTestLog("Test that prepared abort fails on a secondary");
+assert.commandFailedWithCode(secSession.getDatabase('admin').adminCommand(
+ {abortTransaction: 1, txnNumber: txnNumber, autocommit: false}),
+ ErrorCodes.NotMaster);
- jsTestLog("Test that we can still commit the transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(priSession, commitTimestamp));
- rst.awaitReplication();
- assert.docEq(doc, testDB[collName].findOne());
- assert.eq(1, testDB[collName].find().itcount());
- assert.docEq(doc, secondary.getDB(dbName)[collName].findOne());
- assert.eq(1, secondary.getDB(dbName)[collName].find().itcount());
+jsTestLog("Test that we can still commit the transaction");
+assert.commandWorked(PrepareHelpers.commitTransaction(priSession, commitTimestamp));
+rst.awaitReplication();
+assert.docEq(doc, testDB[collName].findOne());
+assert.eq(1, testDB[collName].find().itcount());
+assert.docEq(doc, secondary.getDB(dbName)[collName].findOne());
+assert.eq(1, secondary.getDB(dbName)[collName].find().itcount());
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/prepared_transaction_on_failover.js b/jstests/replsets/prepared_transaction_on_failover.js
index e5d6bf84a1e..43689b95c1e 100644
--- a/jstests/replsets/prepared_transaction_on_failover.js
+++ b/jstests/replsets/prepared_transaction_on_failover.js
@@ -4,131 +4,128 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/rslib.js"); // For reconnect()
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const dbName = jsTest.name();
- const collName = "coll";
- const otherDbName = dbName + "_other";
-
- function testTransactionsWithFailover(doWork, stepDown, postCommit) {
- const primary = replTest.getPrimary();
- const newPrimary = replTest.getSecondary();
- const testDB = primary.getDB(dbName);
-
- testDB.dropDatabase();
- testDB.getSiblingDB(otherDbName).dropDatabase();
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
-
- jsTestLog("Starting transaction");
- const session = primary.startSession({causalConsistency: false});
- session.startTransaction({writeConcern: {w: "majority"}});
-
- doWork(primary, session);
-
- jsTestLog("Putting transaction into prepare");
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- replTest.awaitReplication();
-
- stepDown();
- reconnect(primary);
-
- jsTestLog("Waiting for the other node to run for election and become primary");
- assert.eq(replTest.getPrimary(), newPrimary);
-
- jsTestLog("Creating an unrelated collection");
- // Application of an unrelated DDL command needs a strong lock on secondary. Make sure
- // the prepared transactions have yielded their locks on secondary.
- assert.commandWorked(newPrimary.getDB(otherDbName).runCommand({create: collName}));
- replTest.awaitReplication();
-
- jsTestLog("Dropping the collection in use cannot acquire the lock");
- assert.commandFailedWithCode(
- newPrimary.getDB(testDB).runCommand({drop: collName, maxTimeMS: 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Committing transaction on the new primary");
- // Create a proxy session to reuse the session state of the old primary.
- const newSession = new _DelegatingDriverSession(newPrimary, session);
-
- assert.commandWorked(PrepareHelpers.commitTransaction(newSession, prepareTimestamp));
- replTest.awaitReplication();
-
- postCommit(primary, newPrimary);
-
- jsTestLog("Running another transaction on the new primary");
- const secondSession = newPrimary.startSession({causalConsistency: false});
- secondSession.startTransaction({writeConcern: {w: "majority"}});
- assert.commandWorked(
- secondSession.getDatabase(dbName).getCollection(collName).insert({_id: "second-doc"}));
- assert.commandWorked(secondSession.commitTransaction_forTesting());
-
- // Unfreeze the original primary so that it can stand for election again for the next test.
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
- }
-
- function doInsert(primary, session) {
- const doc = {_id: "txn on primary " + primary};
- jsTestLog("Inserting a document in a transaction.");
- assert.commandWorked(session.getDatabase(dbName).getCollection(collName).insert(doc));
- }
- function postInsert(primary, newPrimary) {
- const doc = {_id: "txn on primary " + primary};
- assert.docEq(doc, primary.getDB(dbName).getCollection(collName).findOne());
- assert.docEq(doc, newPrimary.getDB(dbName).getCollection(collName).findOne());
- }
-
- function doInsertTextSearch(primary, session) {
- // Create an index outside of the transaction.
- assert.commandWorked(
- primary.getDB(dbName).getCollection(collName).createIndex({text: "text"}));
-
- // Do the followings in a transaction.
- jsTestLog("Inserting a document in a transaction.");
- assert.commandWorked(
- session.getDatabase(dbName).getCollection(collName).insert({text: "text"}));
- // Text search will recursively acquire the global lock. This tests that yielding
- // recursively held locks works on step down.
- jsTestLog("Doing a text search in a transaction.");
- assert.eq(1,
- session.getDatabase(dbName)
- .getCollection(collName)
- .find({$text: {$search: "text"}})
- .itcount());
- }
- function postInsertTextSearch(primary, newPrimary) {
- assert.eq(1,
- primary.getDB(dbName)
- .getCollection(collName)
- .find({$text: {$search: "text"}})
- .itcount());
- assert.eq(1,
- newPrimary.getDB(dbName)
- .getCollection(collName)
- .find({$text: {$search: "text"}})
- .itcount());
- }
-
- function stepDownViaHeartbeat() {
- jsTestLog("Stepping down primary via heartbeat");
- replTest.stepUp(replTest.getSecondary());
- }
-
- function stepDownViaCommand() {
- jsTestLog("Stepping down primary via command");
- assert.commandWorked(replTest.getPrimary().adminCommand({replSetStepDown: 10}));
- }
-
- testTransactionsWithFailover(doInsert, stepDownViaHeartbeat, postInsert);
- testTransactionsWithFailover(doInsert, stepDownViaCommand, postInsert);
-
- testTransactionsWithFailover(doInsertTextSearch, stepDownViaHeartbeat, postInsertTextSearch);
- testTransactionsWithFailover(doInsertTextSearch, stepDownViaCommand, postInsertTextSearch);
-
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/rslib.js"); // For reconnect()
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const dbName = jsTest.name();
+const collName = "coll";
+const otherDbName = dbName + "_other";
+
+function testTransactionsWithFailover(doWork, stepDown, postCommit) {
+ const primary = replTest.getPrimary();
+ const newPrimary = replTest.getSecondary();
+ const testDB = primary.getDB(dbName);
+
+ testDB.dropDatabase();
+ testDB.getSiblingDB(otherDbName).dropDatabase();
+ assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+
+ jsTestLog("Starting transaction");
+ const session = primary.startSession({causalConsistency: false});
+ session.startTransaction({writeConcern: {w: "majority"}});
+
+ doWork(primary, session);
+
+ jsTestLog("Putting transaction into prepare");
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+ replTest.awaitReplication();
+
+ stepDown();
+ reconnect(primary);
+
+ jsTestLog("Waiting for the other node to run for election and become primary");
+ assert.eq(replTest.getPrimary(), newPrimary);
+
+ jsTestLog("Creating an unrelated collection");
+ // Application of an unrelated DDL command needs a strong lock on secondary. Make sure
+ // the prepared transactions have yielded their locks on secondary.
+ assert.commandWorked(newPrimary.getDB(otherDbName).runCommand({create: collName}));
+ replTest.awaitReplication();
+
+ jsTestLog("Dropping the collection in use cannot acquire the lock");
+ assert.commandFailedWithCode(
+ newPrimary.getDB(testDB).runCommand({drop: collName, maxTimeMS: 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+ jsTestLog("Committing transaction on the new primary");
+ // Create a proxy session to reuse the session state of the old primary.
+ const newSession = new _DelegatingDriverSession(newPrimary, session);
+
+ assert.commandWorked(PrepareHelpers.commitTransaction(newSession, prepareTimestamp));
+ replTest.awaitReplication();
+
+ postCommit(primary, newPrimary);
+
+ jsTestLog("Running another transaction on the new primary");
+ const secondSession = newPrimary.startSession({causalConsistency: false});
+ secondSession.startTransaction({writeConcern: {w: "majority"}});
+ assert.commandWorked(
+ secondSession.getDatabase(dbName).getCollection(collName).insert({_id: "second-doc"}));
+ assert.commandWorked(secondSession.commitTransaction_forTesting());
+
+ // Unfreeze the original primary so that it can stand for election again for the next test.
+ assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+}
+
+function doInsert(primary, session) {
+ const doc = {_id: "txn on primary " + primary};
+ jsTestLog("Inserting a document in a transaction.");
+ assert.commandWorked(session.getDatabase(dbName).getCollection(collName).insert(doc));
+}
+function postInsert(primary, newPrimary) {
+ const doc = {_id: "txn on primary " + primary};
+ assert.docEq(doc, primary.getDB(dbName).getCollection(collName).findOne());
+ assert.docEq(doc, newPrimary.getDB(dbName).getCollection(collName).findOne());
+}
+
+function doInsertTextSearch(primary, session) {
+ // Create an index outside of the transaction.
+ assert.commandWorked(primary.getDB(dbName).getCollection(collName).createIndex({text: "text"}));
+
+ // Do the followings in a transaction.
+ jsTestLog("Inserting a document in a transaction.");
+ assert.commandWorked(
+ session.getDatabase(dbName).getCollection(collName).insert({text: "text"}));
+ // Text search will recursively acquire the global lock. This tests that yielding
+ // recursively held locks works on step down.
+ jsTestLog("Doing a text search in a transaction.");
+ assert.eq(1,
+ session.getDatabase(dbName)
+ .getCollection(collName)
+ .find({$text: {$search: "text"}})
+ .itcount());
+}
+function postInsertTextSearch(primary, newPrimary) {
+ assert.eq(
+ 1,
+ primary.getDB(dbName).getCollection(collName).find({$text: {$search: "text"}}).itcount());
+ assert.eq(1,
+ newPrimary.getDB(dbName)
+ .getCollection(collName)
+ .find({$text: {$search: "text"}})
+ .itcount());
+}
+
+function stepDownViaHeartbeat() {
+ jsTestLog("Stepping down primary via heartbeat");
+ replTest.stepUp(replTest.getSecondary());
+}
+
+function stepDownViaCommand() {
+ jsTestLog("Stepping down primary via command");
+ assert.commandWorked(replTest.getPrimary().adminCommand({replSetStepDown: 10}));
+}
+
+testTransactionsWithFailover(doInsert, stepDownViaHeartbeat, postInsert);
+testTransactionsWithFailover(doInsert, stepDownViaCommand, postInsert);
+
+testTransactionsWithFailover(doInsertTextSearch, stepDownViaHeartbeat, postInsertTextSearch);
+testTransactionsWithFailover(doInsertTextSearch, stepDownViaCommand, postInsertTextSearch);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/primary_casts_vote_on_stepdown.js b/jstests/replsets/primary_casts_vote_on_stepdown.js
index 6271a353ea9..f07951a69c8 100644
--- a/jstests/replsets/primary_casts_vote_on_stepdown.js
+++ b/jstests/replsets/primary_casts_vote_on_stepdown.js
@@ -6,30 +6,29 @@
* successfully.
*/
(function() {
- "use strict";
+"use strict";
- let name = "primary_casts_vote_on_stepdown";
- let replTest = new ReplSetTest({name: name, nodes: 2});
+let name = "primary_casts_vote_on_stepdown";
+let replTest = new ReplSetTest({name: name, nodes: 2});
- let nodes = replTest.startSet();
- replTest.initiate();
+let nodes = replTest.startSet();
+replTest.initiate();
- // Make sure node 0 is initially primary, and then step up node 1 and make sure it is able to
- // become primary in one election, gathering the vote of node 0, who will be forced to step
- // down in the act of granting its vote to node 1.
- jsTestLog("Make sure node 0 (" + nodes[0] + ") is primary.");
- replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY);
- let res = assert.commandWorked(nodes[0].adminCommand("replSetGetStatus"));
- let firstPrimaryTerm = res.term;
+// Make sure node 0 is initially primary, and then step up node 1 and make sure it is able to
+// become primary in one election, gathering the vote of node 0, who will be forced to step
+// down in the act of granting its vote to node 1.
+jsTestLog("Make sure node 0 (" + nodes[0] + ") is primary.");
+replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY);
+let res = assert.commandWorked(nodes[0].adminCommand("replSetGetStatus"));
+let firstPrimaryTerm = res.term;
- jsTestLog("Stepping up node 1 (" + nodes[1] + ").");
- replTest.stepUp(nodes[1]);
- replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
- // The election should have happened in a single attempt, so the term of the new primary should
- // be exactly 1 greater than the old primary.
- res = assert.commandWorked(nodes[1].adminCommand("replSetGetStatus"));
- assert.eq(firstPrimaryTerm + 1, res.term);
-
- replTest.stopSet();
+jsTestLog("Stepping up node 1 (" + nodes[1] + ").");
+replTest.stepUp(nodes[1]);
+replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
+// The election should have happened in a single attempt, so the term of the new primary should
+// be exactly 1 greater than the old primary.
+res = assert.commandWorked(nodes[1].adminCommand("replSetGetStatus"));
+assert.eq(firstPrimaryTerm + 1, res.term);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/priority_takeover_cascading_priorities.js b/jstests/replsets/priority_takeover_cascading_priorities.js
index f55997aa5c2..b0493c62dfb 100644
--- a/jstests/replsets/priority_takeover_cascading_priorities.js
+++ b/jstests/replsets/priority_takeover_cascading_priorities.js
@@ -4,33 +4,33 @@
// Shut down the primary and confirm that the next highest priority node becomes primary.
// Repeat until 3 nodes are left standing.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
+'use strict';
+load('jstests/replsets/rslib.js');
- var name = 'priority_takeover_cascading_priorities';
- var replSet = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {priority: 5}},
- {rsConfig: {priority: 4}},
- {rsConfig: {priority: 3}},
- {rsConfig: {priority: 2}},
- {rsConfig: {priority: 1}},
- ]
- });
- replSet.startSet();
- replSet.initiate();
+var name = 'priority_takeover_cascading_priorities';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {priority: 5}},
+ {rsConfig: {priority: 4}},
+ {rsConfig: {priority: 3}},
+ {rsConfig: {priority: 2}},
+ {rsConfig: {priority: 1}},
+ ]
+});
+replSet.startSet();
+replSet.initiate();
- replSet.waitForState(0, ReplSetTest.State.PRIMARY);
- // Wait until all nodes get the "no-op" of "new primary" after initial sync.
- waitUntilAllNodesCaughtUp(replSet.nodes);
- replSet.stop(0);
+replSet.waitForState(0, ReplSetTest.State.PRIMARY);
+// Wait until all nodes get the "no-op" of "new primary" after initial sync.
+waitUntilAllNodesCaughtUp(replSet.nodes);
+replSet.stop(0);
- replSet.waitForState(1, ReplSetTest.State.PRIMARY);
- replSet.stop(1);
+replSet.waitForState(1, ReplSetTest.State.PRIMARY);
+replSet.stop(1);
- replSet.waitForState(2, ReplSetTest.State.PRIMARY);
+replSet.waitForState(2, ReplSetTest.State.PRIMARY);
- // Cannot stop any more nodes because we will not have a majority.
- replSet.stopSet();
+// Cannot stop any more nodes because we will not have a majority.
+replSet.stopSet();
})();
diff --git a/jstests/replsets/priority_takeover_one_node_higher_priority.js b/jstests/replsets/priority_takeover_one_node_higher_priority.js
index 98add7a6571..24482229c1d 100644
--- a/jstests/replsets/priority_takeover_one_node_higher_priority.js
+++ b/jstests/replsets/priority_takeover_one_node_higher_priority.js
@@ -4,50 +4,48 @@
// Step down high priority node. Wait for the lower priority electable node to become primary.
// Eventually high priority node will run a priority takeover election to become primary.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
- load('jstests/replsets/libs/election_metrics.js');
-
- var name = 'priority_takeover_one_node_higher_priority';
- var replSet = new ReplSetTest({
- name: name,
- nodes: [
- {rsConfig: {priority: 3}},
- {},
- {rsConfig: {arbiterOnly: true}},
- ]
- });
- replSet.startSet();
- replSet.initiate();
-
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replSet.getPrimary();
-
- const initialPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
-
- replSet.awaitSecondaryNodes();
- replSet.awaitReplication();
-
- // Primary should step down long enough for election to occur on secondary.
- var config = assert.commandWorked(primary.adminCommand({replSetGetConfig: 1})).config;
- assert.commandWorked(primary.adminCommand({replSetStepDown: replSet.kDefaultTimeoutMS / 1000}));
-
- // Step down primary and wait for node 1 to be promoted to primary.
- replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY);
-
- // Unfreeze node 0 so it can seek election.
- assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
-
- // Eventually node 0 will stand for election again because it has a higher priorty.
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
-
- // Check that both the 'called' and 'successful' fields of the 'priorityTakeover' election
- // reason counter have been incremented in serverStatus.
- const newPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
- verifyServerStatusElectionReasonCounterChange(initialPrimaryStatus.electionMetrics,
- newPrimaryStatus.electionMetrics,
- "priorityTakeover",
- 1);
-
- replSet.stopSet();
+'use strict';
+load('jstests/replsets/rslib.js');
+load('jstests/replsets/libs/election_metrics.js');
+
+var name = 'priority_takeover_one_node_higher_priority';
+var replSet = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {priority: 3}},
+ {},
+ {rsConfig: {arbiterOnly: true}},
+ ]
+});
+replSet.startSet();
+replSet.initiate();
+
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+var primary = replSet.getPrimary();
+
+const initialPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+
+replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
+
+// Primary should step down long enough for election to occur on secondary.
+var config = assert.commandWorked(primary.adminCommand({replSetGetConfig: 1})).config;
+assert.commandWorked(primary.adminCommand({replSetStepDown: replSet.kDefaultTimeoutMS / 1000}));
+
+// Step down primary and wait for node 1 to be promoted to primary.
+replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY);
+
+// Unfreeze node 0 so it can seek election.
+assert.commandWorked(primary.adminCommand({replSetFreeze: 0}));
+
+// Eventually node 0 will stand for election again because it has a higher priorty.
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+
+// Check that both the 'called' and 'successful' fields of the 'priorityTakeover' election
+// reason counter have been incremented in serverStatus.
+const newPrimaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+verifyServerStatusElectionReasonCounterChange(
+ initialPrimaryStatus.electionMetrics, newPrimaryStatus.electionMetrics, "priorityTakeover", 1);
+
+replSet.stopSet();
})();
diff --git a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
index 2a7a8b610df..d6cdd7efbba 100644
--- a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
+++ b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
@@ -8,48 +8,47 @@
load('jstests/replsets/rslib.js');
(function() {
- 'use strict';
-
- var name = 'priority_takeover_two_nodes_equal_priority';
- var replTest = new ReplSetTest(
- {name: name, nodes: [{rsConfig: {priority: 3}}, {rsConfig: {priority: 3}}, {}]});
- replTest.startSet();
- replTest.initiate();
-
- jsTestLog("Waiting for one of the high priority nodes to become PRIMARY.");
- var primary;
- var primaryIndex = -1;
- var defaultPriorityNodeIndex = 2;
- assert.soon(
- function() {
- primary = replTest.getPrimary();
- replTest.nodes.find(function(node, index, array) {
- if (primary.host == node.host) {
- primaryIndex = index;
- return true;
- }
- return false;
- });
- return primaryIndex !== defaultPriorityNodeIndex;
- },
- 'Neither of the high priority nodes was elected primary.',
- replTest.kDefaultTimeoutMS, // timeout
- 1000 // interval
- );
-
- jsTestLog("Stepping down the current primary.");
- assert.commandWorked(
- primary.adminCommand({replSetStepDown: 10 * 60, secondaryCatchUpPeriodSecs: 10 * 60}));
-
- // Make sure the primary has stepped down.
- assert.neq(primary, replTest.getPrimary());
-
- // We expect the other high priority node to eventually become primary.
- var expectedNewPrimaryIndex = (primaryIndex === 0) ? 1 : 0;
-
- jsTestLog("Waiting for the other high priority node to become PRIMARY.");
- var expectedNewPrimary = replTest.nodes[expectedNewPrimaryIndex];
- replTest.waitForState(expectedNewPrimary, ReplSetTest.State.PRIMARY);
- replTest.stopSet();
-
+'use strict';
+
+var name = 'priority_takeover_two_nodes_equal_priority';
+var replTest = new ReplSetTest(
+ {name: name, nodes: [{rsConfig: {priority: 3}}, {rsConfig: {priority: 3}}, {}]});
+replTest.startSet();
+replTest.initiate();
+
+jsTestLog("Waiting for one of the high priority nodes to become PRIMARY.");
+var primary;
+var primaryIndex = -1;
+var defaultPriorityNodeIndex = 2;
+assert.soon(
+ function() {
+ primary = replTest.getPrimary();
+ replTest.nodes.find(function(node, index, array) {
+ if (primary.host == node.host) {
+ primaryIndex = index;
+ return true;
+ }
+ return false;
+ });
+ return primaryIndex !== defaultPriorityNodeIndex;
+ },
+ 'Neither of the high priority nodes was elected primary.',
+ replTest.kDefaultTimeoutMS, // timeout
+ 1000 // interval
+);
+
+jsTestLog("Stepping down the current primary.");
+assert.commandWorked(
+ primary.adminCommand({replSetStepDown: 10 * 60, secondaryCatchUpPeriodSecs: 10 * 60}));
+
+// Make sure the primary has stepped down.
+assert.neq(primary, replTest.getPrimary());
+
+// We expect the other high priority node to eventually become primary.
+var expectedNewPrimaryIndex = (primaryIndex === 0) ? 1 : 0;
+
+jsTestLog("Waiting for the other high priority node to become PRIMARY.");
+var expectedNewPrimary = replTest.nodes[expectedNewPrimaryIndex];
+replTest.waitForState(expectedNewPrimary, ReplSetTest.State.PRIMARY);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/read_after_optime.js b/jstests/replsets/read_after_optime.js
index c9369074fee..dad48acd925 100644
--- a/jstests/replsets/read_after_optime.js
+++ b/jstests/replsets/read_after_optime.js
@@ -1,83 +1,83 @@
// Test read after opTime functionality with maxTimeMS.
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
-
- var replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
- var config = replTest.getReplSetConfigFromNode();
-
- var runTest = function(testDB, primaryConn) {
- var dbName = testDB.getName();
- assert.writeOK(primaryConn.getDB(dbName).user.insert({x: 1}, {writeConcern: {w: 2}}));
-
- var localDB = primaryConn.getDB('local');
-
- var oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next();
- var twoKSecTS = new Timestamp(oplogTS.ts.getTime() + 2000, 0);
-
- var term = oplogTS.t;
-
- // Test timeout with maxTimeMS
- var runTimeoutTest = function() {
- assert.commandFailedWithCode(testDB.runCommand({
- find: 'user',
- filter: {x: 1},
- readConcern: {afterOpTime: {ts: twoKSecTS, t: term}},
- maxTimeMS: 5000,
- }),
- ErrorCodes.MaxTimeMSExpired);
- };
-
- // Run the time out test 3 times with replication debug log level increased to 2
- // for first and last run. The time out message should be logged twice.
- testDB.setLogLevel(2, 'command');
- runTimeoutTest();
- testDB.setLogLevel(0, 'command');
-
- var msg = 'Command on database ' + testDB.getName() +
- ' timed out waiting for read concern to be satisfied. Command:';
- checkLog.containsWithCount(testDB.getMongo(), msg, 1);
-
- // Read concern timed out message should not be logged.
- runTimeoutTest();
-
- testDB.setLogLevel(2, 'command');
- runTimeoutTest();
- testDB.setLogLevel(0, 'command');
-
- checkLog.containsWithCount(testDB.getMongo(), msg, 2);
-
- // Test read on future afterOpTime that will eventually occur.
- primaryConn.getDB(dbName).parallelShellStarted.drop();
- oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next();
- var insertFunc = startParallelShell('let testDB = db.getSiblingDB("' + dbName + '"); ' +
- 'sleep(3000); ' +
- 'testDB.user.insert({y: 1});',
- primaryConn.port);
-
- var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0);
- var res = assert.commandWorked(testDB.runCommand({
+"use strict";
+load("jstests/libs/check_log.js");
+
+var replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+var config = replTest.getReplSetConfigFromNode();
+
+var runTest = function(testDB, primaryConn) {
+ var dbName = testDB.getName();
+ assert.writeOK(primaryConn.getDB(dbName).user.insert({x: 1}, {writeConcern: {w: 2}}));
+
+ var localDB = primaryConn.getDB('local');
+
+ var oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ var twoKSecTS = new Timestamp(oplogTS.ts.getTime() + 2000, 0);
+
+ var term = oplogTS.t;
+
+ // Test timeout with maxTimeMS
+ var runTimeoutTest = function() {
+ assert.commandFailedWithCode(testDB.runCommand({
find: 'user',
- filter: {y: 1},
- readConcern: {
- afterOpTime: {ts: twoSecTS, t: term},
- },
- maxTimeMS: 90 * 1000,
- }));
-
- assert.eq(null, res.code);
- assert.eq(res.cursor.firstBatch[0].y, 1);
- insertFunc();
+ filter: {x: 1},
+ readConcern: {afterOpTime: {ts: twoKSecTS, t: term}},
+ maxTimeMS: 5000,
+ }),
+ ErrorCodes.MaxTimeMSExpired);
};
- var primary = replTest.getPrimary();
- jsTest.log("test1");
- runTest(primary.getDB('test1'), primary);
- jsTest.log("test2");
- runTest(replTest.getSecondary().getDB('test2'), primary);
-
- replTest.stopSet();
+ // Run the time out test 3 times with replication debug log level increased to 2
+ // for first and last run. The time out message should be logged twice.
+ testDB.setLogLevel(2, 'command');
+ runTimeoutTest();
+ testDB.setLogLevel(0, 'command');
+
+ var msg = 'Command on database ' + testDB.getName() +
+ ' timed out waiting for read concern to be satisfied. Command:';
+ checkLog.containsWithCount(testDB.getMongo(), msg, 1);
+
+ // Read concern timed out message should not be logged.
+ runTimeoutTest();
+
+ testDB.setLogLevel(2, 'command');
+ runTimeoutTest();
+ testDB.setLogLevel(0, 'command');
+
+ checkLog.containsWithCount(testDB.getMongo(), msg, 2);
+
+ // Test read on future afterOpTime that will eventually occur.
+ primaryConn.getDB(dbName).parallelShellStarted.drop();
+ oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ var insertFunc = startParallelShell('let testDB = db.getSiblingDB("' + dbName + '"); ' +
+ 'sleep(3000); ' +
+ 'testDB.user.insert({y: 1});',
+ primaryConn.port);
+
+ var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0);
+ var res = assert.commandWorked(testDB.runCommand({
+ find: 'user',
+ filter: {y: 1},
+ readConcern: {
+ afterOpTime: {ts: twoSecTS, t: term},
+ },
+ maxTimeMS: 90 * 1000,
+ }));
+
+ assert.eq(null, res.code);
+ assert.eq(res.cursor.firstBatch[0].y, 1);
+ insertFunc();
+};
+
+var primary = replTest.getPrimary();
+jsTest.log("test1");
+runTest(primary.getDB('test1'), primary);
+jsTest.log("test2");
+runTest(replTest.getSecondary().getDB('test2'), primary);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/read_at_cluster_time_outside_transactions.js b/jstests/replsets/read_at_cluster_time_outside_transactions.js
index af0bbf9cf4f..e75bf2656e5 100644
--- a/jstests/replsets/read_at_cluster_time_outside_transactions.js
+++ b/jstests/replsets/read_at_cluster_time_outside_transactions.js
@@ -5,145 +5,148 @@
* @tags: [requires_document_locking, uses_transactions]
*/
(function() {
- "use strict";
-
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
-
- const collName = "read_at_cluster_time_outside_transactions";
- const collection = db[collName];
-
- // We prevent the replica set from advancing oldest_timestamp. This ensures that the snapshot
- // associated with 'clusterTime' is retained for the duration of this test.
- rst.nodes.forEach(conn => {
- assert.commandWorked(conn.adminCommand({
- configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
- mode: "alwaysOn",
- }));
- });
-
- // We insert 3 documents in order to have data to return for both the find and getMore commands
- // when using a batch size of 2. We then save the md5sum associated with the opTime of the last
- // insert.
- assert.commandWorked(collection.insert({_id: 1, comment: "should be seen by find command"}));
- assert.commandWorked(collection.insert({_id: 3, comment: "should be seen by find command"}));
- assert.commandWorked(collection.insert({_id: 5, comment: "should be seen by getMore command"}));
-
- const clusterTime = db.getSession().getOperationTime();
-
- let res = assert.commandWorked(db.runCommand({dbHash: 1}));
- const hashAfterOriginalInserts = {collections: res.collections, md5: res.md5};
-
- // The documents with _id=1 and _id=3 should be returned by the find command.
- let cursor = collection.find().sort({_id: 1}).batchSize(2);
- assert.eq({_id: 1, comment: "should be seen by find command"}, cursor.next());
- assert.eq({_id: 3, comment: "should be seen by find command"}, cursor.next());
-
- // We then insert documents with _id=2 and _id=4. The document with _id=2 is positioned behind
- // the _id index cursor and won't be returned by the getMore command. However, the document with
- // _id=4 is positioned ahead and should end up being returned.
- assert.commandWorked(
- collection.insert({_id: 2, comment: "should not be seen by getMore command"}));
- assert.commandWorked(
- collection.insert({_id: 4, comment: "should be seen by non-snapshot getMore command"}));
- assert.eq({_id: 4, comment: "should be seen by non-snapshot getMore command"}, cursor.next());
- assert.eq({_id: 5, comment: "should be seen by getMore command"}, cursor.next());
- assert(!cursor.hasNext());
-
- // When using the $_internalReadAtClusterTime option with a clusterTime from after the
- // original 3 documents were inserted, the document with _id=2 shouldn't be visible to the find
- // command because it was inserted afterwards. The same applies to the document with _id=4 and
- // the getMore command.
- res = collection.runCommand("find", {
- batchSize: 2,
- sort: {_id: 1},
- $_internalReadAtClusterTime: clusterTime,
- });
-
- const batchSize = 2;
- cursor = new DBCommandCursor(db, res, batchSize);
- assert.eq({_id: 1, comment: "should be seen by find command"}, cursor.next());
- assert.eq({_id: 3, comment: "should be seen by find command"}, cursor.next());
- assert.eq({_id: 5, comment: "should be seen by getMore command"}, cursor.next());
- assert(!cursor.hasNext());
-
- // Using the $_internalReadAtClusterTime option to read at the opTime of the last of the 3
- // original inserts should return the same md5sum as it did originally.
- res = assert.commandWorked(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: clusterTime,
- }));
+"use strict";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
- const hashAtClusterTime = {collections: res.collections, md5: res.md5};
- assert.eq(hashAtClusterTime, hashAfterOriginalInserts);
-
- // Attempting to read at a null timestamp should return an error.
- assert.commandFailedWithCode(collection.runCommand("find", {
- batchSize: 2,
- sort: {_id: 1},
- $_internalReadAtClusterTime: new Timestamp(0, 0),
- }),
- ErrorCodes.InvalidOptions);
-
- assert.commandFailedWithCode(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: new Timestamp(0, 1),
- }),
- ErrorCodes.InvalidOptions);
-
- // Attempting to read at a clusterTime in the future should return an error.
- const futureClusterTime = new Timestamp(clusterTime.getTime() + 1000, 1);
-
- assert.commandFailedWithCode(collection.runCommand("find", {
- batchSize: 2,
- sort: {_id: 1},
- $_internalReadAtClusterTime: futureClusterTime,
- }),
- ErrorCodes.InvalidOptions);
-
- assert.commandFailedWithCode(db.runCommand({
- dbHash: 1,
- $_internalReadAtClusterTime: futureClusterTime,
- }),
- ErrorCodes.InvalidOptions);
-
- // $_internalReadAtClusterTime is not supported in transactions.
- const session = primary.startSession();
- const sessionDB = session.getDatabase("test");
- const sessionColl = sessionDB[collName];
-
- session.startTransaction();
- assert.commandFailedWithCode(sessionColl.runCommand("find", {
- batchSize: 2,
- sort: {_id: 1},
- $_internalReadAtClusterTime: clusterTime,
- }),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // dbHash is not supported in transactions at all.
- session.startTransaction();
- assert.commandFailedWithCode(
- sessionDB.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}),
- ErrorCodes.OperationNotSupportedInTransaction);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- // Create a new collection to move the minimum visible snapshot to that operation time. Then
- // read at a cluster time behind the minimum visible snapshot which should fail.
- let newCollName = "newColl";
- assert.commandWorked(db.createCollection(newCollName));
- let createCollClusterTime = db.getSession().getOperationTime();
- res = db[newCollName].runCommand("find", {
- $_internalReadAtClusterTime:
- Timestamp(createCollClusterTime.getTime() - 1, createCollClusterTime.getInc()),
- });
- assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);
-
- rst.stopSet();
+const collName = "read_at_cluster_time_outside_transactions";
+const collection = db[collName];
+
+// We prevent the replica set from advancing oldest_timestamp. This ensures that the snapshot
+// associated with 'clusterTime' is retained for the duration of this test.
+rst.nodes.forEach(conn => {
+ assert.commandWorked(conn.adminCommand({
+ configureFailPoint: "WTPreserveSnapshotHistoryIndefinitely",
+ mode: "alwaysOn",
+ }));
+});
+
+// We insert 3 documents in order to have data to return for both the find and getMore commands
+// when using a batch size of 2. We then save the md5sum associated with the opTime of the last
+// insert.
+assert.commandWorked(collection.insert({_id: 1, comment: "should be seen by find command"}));
+assert.commandWorked(collection.insert({_id: 3, comment: "should be seen by find command"}));
+assert.commandWorked(collection.insert({_id: 5, comment: "should be seen by getMore command"}));
+
+const clusterTime = db.getSession().getOperationTime();
+
+let res = assert.commandWorked(db.runCommand({dbHash: 1}));
+const hashAfterOriginalInserts = {
+ collections: res.collections,
+ md5: res.md5
+};
+
+// The documents with _id=1 and _id=3 should be returned by the find command.
+let cursor = collection.find().sort({_id: 1}).batchSize(2);
+assert.eq({_id: 1, comment: "should be seen by find command"}, cursor.next());
+assert.eq({_id: 3, comment: "should be seen by find command"}, cursor.next());
+
+// We then insert documents with _id=2 and _id=4. The document with _id=2 is positioned behind
+// the _id index cursor and won't be returned by the getMore command. However, the document with
+// _id=4 is positioned ahead and should end up being returned.
+assert.commandWorked(collection.insert({_id: 2, comment: "should not be seen by getMore command"}));
+assert.commandWorked(
+ collection.insert({_id: 4, comment: "should be seen by non-snapshot getMore command"}));
+assert.eq({_id: 4, comment: "should be seen by non-snapshot getMore command"}, cursor.next());
+assert.eq({_id: 5, comment: "should be seen by getMore command"}, cursor.next());
+assert(!cursor.hasNext());
+
+// When using the $_internalReadAtClusterTime option with a clusterTime from after the
+// original 3 documents were inserted, the document with _id=2 shouldn't be visible to the find
+// command because it was inserted afterwards. The same applies to the document with _id=4 and
+// the getMore command.
+res = collection.runCommand("find", {
+ batchSize: 2,
+ sort: {_id: 1},
+ $_internalReadAtClusterTime: clusterTime,
+});
+
+const batchSize = 2;
+cursor = new DBCommandCursor(db, res, batchSize);
+assert.eq({_id: 1, comment: "should be seen by find command"}, cursor.next());
+assert.eq({_id: 3, comment: "should be seen by find command"}, cursor.next());
+assert.eq({_id: 5, comment: "should be seen by getMore command"}, cursor.next());
+assert(!cursor.hasNext());
+
+// Using the $_internalReadAtClusterTime option to read at the opTime of the last of the 3
+// original inserts should return the same md5sum as it did originally.
+res = assert.commandWorked(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: clusterTime,
+}));
+
+const hashAtClusterTime = {
+ collections: res.collections,
+ md5: res.md5
+};
+assert.eq(hashAtClusterTime, hashAfterOriginalInserts);
+
+// Attempting to read at a null timestamp should return an error.
+assert.commandFailedWithCode(collection.runCommand("find", {
+ batchSize: 2,
+ sort: {_id: 1},
+ $_internalReadAtClusterTime: new Timestamp(0, 0),
+}),
+ ErrorCodes.InvalidOptions);
+
+assert.commandFailedWithCode(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: new Timestamp(0, 1),
+}),
+ ErrorCodes.InvalidOptions);
+
+// Attempting to read at a clusterTime in the future should return an error.
+const futureClusterTime = new Timestamp(clusterTime.getTime() + 1000, 1);
+
+assert.commandFailedWithCode(collection.runCommand("find", {
+ batchSize: 2,
+ sort: {_id: 1},
+ $_internalReadAtClusterTime: futureClusterTime,
+}),
+ ErrorCodes.InvalidOptions);
+
+assert.commandFailedWithCode(db.runCommand({
+ dbHash: 1,
+ $_internalReadAtClusterTime: futureClusterTime,
+}),
+ ErrorCodes.InvalidOptions);
+
+// $_internalReadAtClusterTime is not supported in transactions.
+const session = primary.startSession();
+const sessionDB = session.getDatabase("test");
+const sessionColl = sessionDB[collName];
+
+session.startTransaction();
+assert.commandFailedWithCode(sessionColl.runCommand("find", {
+ batchSize: 2,
+ sort: {_id: 1},
+ $_internalReadAtClusterTime: clusterTime,
+}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// dbHash is not supported in transactions at all.
+session.startTransaction();
+assert.commandFailedWithCode(
+ sessionDB.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}),
+ ErrorCodes.OperationNotSupportedInTransaction);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+// Create a new collection to move the minimum visible snapshot to that operation time. Then
+// read at a cluster time behind the minimum visible snapshot which should fail.
+let newCollName = "newColl";
+assert.commandWorked(db.createCollection(newCollName));
+let createCollClusterTime = db.getSession().getOperationTime();
+res = db[newCollName].runCommand("find", {
+ $_internalReadAtClusterTime:
+ Timestamp(createCollClusterTime.getTime() - 1, createCollClusterTime.getInc()),
+});
+assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/read_committed.js b/jstests/replsets/read_committed.js
index f76ea6488a1..79a9cd3b0fa 100644
--- a/jstests/replsets/read_committed.js
+++ b/jstests/replsets/read_committed.js
@@ -9,169 +9,171 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
-
- const majorityWriteConcern = {writeConcern: {w: "majority", wtimeout: 60 * 1000}};
-
- // Each test case includes a 'prepareCollection' method that sets up the initial state starting
- // with an empty collection, a 'write' method that does some write, and two arrays,
- // 'expectedBefore' and 'expectedAfter' that describe the expected contents of the collection
- // before and after the write. The 'prepareCollection' and 'write' methods should leave the
- // collection either empty or with a single document with _id: 1.
- const testCases = {
- insert: {
- prepareCollection: function(coll) {}, // No-op
- write: function(coll, writeConcern) {
- assert.writeOK(coll.insert({_id: 1}, writeConcern));
- },
- expectedBefore: [],
- expectedAfter: [{_id: 1}],
+"use strict";
+
+const majorityWriteConcern = {
+ writeConcern: {w: "majority", wtimeout: 60 * 1000}
+};
+
+// Each test case includes a 'prepareCollection' method that sets up the initial state starting
+// with an empty collection, a 'write' method that does some write, and two arrays,
+// 'expectedBefore' and 'expectedAfter' that describe the expected contents of the collection
+// before and after the write. The 'prepareCollection' and 'write' methods should leave the
+// collection either empty or with a single document with _id: 1.
+const testCases = {
+ insert: {
+ prepareCollection: function(coll) {}, // No-op
+ write: function(coll, writeConcern) {
+ assert.writeOK(coll.insert({_id: 1}, writeConcern));
},
- update: {
- prepareCollection: function(coll) {
- assert.writeOK(coll.insert({_id: 1, state: 'before'}, majorityWriteConcern));
- },
- write: function(coll, writeConcern) {
- assert.writeOK(coll.update({_id: 1}, {$set: {state: 'after'}}, writeConcern));
- },
- expectedBefore: [{_id: 1, state: 'before'}],
- expectedAfter: [{_id: 1, state: 'after'}],
+ expectedBefore: [],
+ expectedAfter: [{_id: 1}],
+ },
+ update: {
+ prepareCollection: function(coll) {
+ assert.writeOK(coll.insert({_id: 1, state: 'before'}, majorityWriteConcern));
},
- remove: {
- prepareCollection: function(coll) {
- assert.writeOK(coll.insert({_id: 1}, majorityWriteConcern));
- },
- write: function(coll, writeConcern) {
- assert.writeOK(coll.remove({_id: 1}, writeConcern));
- },
- expectedBefore: [{_id: 1}],
- expectedAfter: [],
+ write: function(coll, writeConcern) {
+ assert.writeOK(coll.update({_id: 1}, {$set: {state: 'after'}}, writeConcern));
},
+ expectedBefore: [{_id: 1, state: 'before'}],
+ expectedAfter: [{_id: 1, state: 'after'}],
+ },
+ remove: {
+ prepareCollection: function(coll) {
+ assert.writeOK(coll.insert({_id: 1}, majorityWriteConcern));
+ },
+ write: function(coll, writeConcern) {
+ assert.writeOK(coll.remove({_id: 1}, writeConcern));
+ },
+ expectedBefore: [{_id: 1}],
+ expectedAfter: [],
+ },
+};
+
+// Set up a set and grab things for later.
+var name = "read_committed";
+var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
+
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+};
+
+replTest.initiate(config);
+
+// Get connections and collection.
+var primary = replTest.getPrimary();
+var secondary = replTest._slaves[0];
+var coll = primary.getDB(name)[name];
+var secondaryColl = secondary.getDB(name)[name];
+
+function log(arg) {
+ jsTest.log(tojson(arg));
+}
+
+function doRead(coll, readConcern) {
+ readConcern.maxTimeMS = 3000;
+ var res = assert.commandWorked(coll.runCommand('find', readConcern));
+ return new DBCommandCursor(coll.getDB(), res).toArray();
+}
+
+function doDirtyRead(coll) {
+ log("doing dirty read");
+ var ret = doRead(coll, {"readConcern": {"level": "local"}});
+ log("done doing dirty read.");
+ return ret;
+}
+
+function doCommittedRead(coll) {
+ log("doing committed read");
+ var ret = doRead(coll, {"readConcern": {"level": "majority"}});
+ log("done doing committed read.");
+ return ret;
+}
+
+function readLatestOplogEntry(readConcernLevel) {
+ var oplog = primary.getDB('local').oplog.rs;
+ var res = oplog.runCommand('find', {
+ "readConcern": {"level": readConcernLevel},
+ "maxTimeMS": 3000,
+ sort: {$natural: -1},
+ limit: 1,
+ });
+ assert.commandWorked(res);
+ return new DBCommandCursor(coll.getDB(), res).toArray()[0];
+}
+
+for (var testName in testCases) {
+ jsTestLog('Running test ' + testName);
+ var test = testCases[testName];
+
+ const setUpInitialState = function setUpInitialState() {
+ assert.writeOK(coll.remove({}, majorityWriteConcern));
+ test.prepareCollection(coll);
+ // Do some sanity checks.
+ assert.eq(doDirtyRead(coll), test.expectedBefore);
+ assert.eq(doCommittedRead(coll), test.expectedBefore);
};
- // Set up a set and grab things for later.
- var name = "read_committed";
- var replTest =
- new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
-
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
-
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- };
-
- replTest.initiate(config);
-
- // Get connections and collection.
- var primary = replTest.getPrimary();
- var secondary = replTest._slaves[0];
- var coll = primary.getDB(name)[name];
- var secondaryColl = secondary.getDB(name)[name];
-
- function log(arg) {
- jsTest.log(tojson(arg));
- }
-
- function doRead(coll, readConcern) {
- readConcern.maxTimeMS = 3000;
- var res = assert.commandWorked(coll.runCommand('find', readConcern));
- return new DBCommandCursor(coll.getDB(), res).toArray();
- }
-
- function doDirtyRead(coll) {
- log("doing dirty read");
- var ret = doRead(coll, {"readConcern": {"level": "local"}});
- log("done doing dirty read.");
- return ret;
- }
-
- function doCommittedRead(coll) {
- log("doing committed read");
- var ret = doRead(coll, {"readConcern": {"level": "majority"}});
- log("done doing committed read.");
- return ret;
- }
-
- function readLatestOplogEntry(readConcernLevel) {
- var oplog = primary.getDB('local').oplog.rs;
- var res = oplog.runCommand('find', {
- "readConcern": {"level": readConcernLevel},
- "maxTimeMS": 3000,
- sort: {$natural: -1},
- limit: 1,
+ // Writes done with majority write concern must be immediately visible to both dirty and
+ // committed reads.
+ setUpInitialState();
+ test.write(coll, majorityWriteConcern);
+ assert.eq(doDirtyRead(coll), test.expectedAfter);
+ assert.eq(doCommittedRead(coll), test.expectedAfter);
+
+ // Return to the initial state, then stop the secondary from applying new writes to prevent
+ // them from becoming committed.
+ setUpInitialState();
+ assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+ const initialOplogTs = readLatestOplogEntry('local').ts;
+
+ // Writes done without majority write concern must be immediately visible to dirty read
+ // and hidden from committed reads until they have been replicated. The rules for seeing
+ // an oplog entry for a write are the same as for the write itself.
+ test.write(coll, {});
+ assert.eq(doDirtyRead(coll), test.expectedAfter);
+ assert.neq(readLatestOplogEntry('local').ts, initialOplogTs);
+ assert.eq(doCommittedRead(coll), test.expectedBefore);
+ assert.eq(readLatestOplogEntry('majority').ts, initialOplogTs);
+
+ // Try the committed read again after sleeping to ensure it doesn't only work for
+ // queries immediately after the write.
+ sleep(1000);
+ assert.eq(doCommittedRead(coll), test.expectedBefore);
+ assert.eq(readLatestOplogEntry('majority').ts, initialOplogTs);
+
+ // Restart oplog application on the secondary and ensure the committed view is updated.
+ assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+ coll.getDB().getLastError("majority", 60 * 1000);
+ assert.eq(doCommittedRead(coll), test.expectedAfter);
+ assert.neq(readLatestOplogEntry('majority').ts, initialOplogTs);
+
+ // The secondary will be able to make the write committed soon after the primary, but there
+ // is no way to block until it does.
+ try {
+ assert.soon(function() {
+ return friendlyEqual(doCommittedRead(secondaryColl), test.expectedAfter);
});
- assert.commandWorked(res);
- return new DBCommandCursor(coll.getDB(), res).toArray()[0];
+ } catch (e) {
+ // generate useful error messages on failures.
+ assert.eq(doCommittedRead(secondaryColl), test.expectedAfter);
}
-
- for (var testName in testCases) {
- jsTestLog('Running test ' + testName);
- var test = testCases[testName];
-
- const setUpInitialState = function setUpInitialState() {
- assert.writeOK(coll.remove({}, majorityWriteConcern));
- test.prepareCollection(coll);
- // Do some sanity checks.
- assert.eq(doDirtyRead(coll), test.expectedBefore);
- assert.eq(doCommittedRead(coll), test.expectedBefore);
- };
-
- // Writes done with majority write concern must be immediately visible to both dirty and
- // committed reads.
- setUpInitialState();
- test.write(coll, majorityWriteConcern);
- assert.eq(doDirtyRead(coll), test.expectedAfter);
- assert.eq(doCommittedRead(coll), test.expectedAfter);
-
- // Return to the initial state, then stop the secondary from applying new writes to prevent
- // them from becoming committed.
- setUpInitialState();
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- const initialOplogTs = readLatestOplogEntry('local').ts;
-
- // Writes done without majority write concern must be immediately visible to dirty read
- // and hidden from committed reads until they have been replicated. The rules for seeing
- // an oplog entry for a write are the same as for the write itself.
- test.write(coll, {});
- assert.eq(doDirtyRead(coll), test.expectedAfter);
- assert.neq(readLatestOplogEntry('local').ts, initialOplogTs);
- assert.eq(doCommittedRead(coll), test.expectedBefore);
- assert.eq(readLatestOplogEntry('majority').ts, initialOplogTs);
-
- // Try the committed read again after sleeping to ensure it doesn't only work for
- // queries immediately after the write.
- sleep(1000);
- assert.eq(doCommittedRead(coll), test.expectedBefore);
- assert.eq(readLatestOplogEntry('majority').ts, initialOplogTs);
-
- // Restart oplog application on the secondary and ensure the committed view is updated.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- coll.getDB().getLastError("majority", 60 * 1000);
- assert.eq(doCommittedRead(coll), test.expectedAfter);
- assert.neq(readLatestOplogEntry('majority').ts, initialOplogTs);
-
- // The secondary will be able to make the write committed soon after the primary, but there
- // is no way to block until it does.
- try {
- assert.soon(function() {
- return friendlyEqual(doCommittedRead(secondaryColl), test.expectedAfter);
- });
- } catch (e) {
- // generate useful error messages on failures.
- assert.eq(doCommittedRead(secondaryColl), test.expectedAfter);
- }
- }
- replTest.stopSet();
+}
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_committed_after_rollback.js b/jstests/replsets/read_committed_after_rollback.js
index bdb83b144a8..097c75c1564 100644
--- a/jstests/replsets/read_committed_after_rollback.js
+++ b/jstests/replsets/read_committed_after_rollback.js
@@ -8,148 +8,144 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
-
- function assertCommittedReadsBlock(coll) {
- var res =
- coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 3000});
- assert.commandFailedWithCode(
- res,
- ErrorCodes.MaxTimeMSExpired,
- "Expected read of " + coll.getFullName() + ' on ' + coll.getMongo().host + " to block");
+"use strict";
+
+function assertCommittedReadsBlock(coll) {
+ var res = coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 3000});
+ assert.commandFailedWithCode(
+ res,
+ ErrorCodes.MaxTimeMSExpired,
+ "Expected read of " + coll.getFullName() + ' on ' + coll.getMongo().host + " to block");
+}
+
+function doCommittedRead(coll) {
+ var res = coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 10000});
+ assert.commandWorked(res, 'reading from ' + coll.getFullName() + ' on ' + coll.getMongo().host);
+ return new DBCommandCursor(coll.getDB(), res).toArray()[0].state;
+}
+
+function doDirtyRead(coll) {
+ var res = coll.runCommand('find', {"readConcern": {"level": "local"}});
+ assert.commandWorked(res, 'reading from ' + coll.getFullName() + ' on ' + coll.getMongo().host);
+ return new DBCommandCursor(coll.getDB(), res).toArray()[0].state;
+}
+
+// Set up a set and grab things for later.
+var name = "read_committed_after_rollback";
+var replTest = new ReplSetTest(
+ {name: name, nodes: 5, useBridge: true, nodeOptions: {enableMajorityReadConcern: ''}});
+
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
+
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], priority: 0},
+ // Note: using two arbiters to ensure that a host that can't talk to any other
+ // data-bearing node can still be elected. This also means that a write isn't considered
+ // committed until it is on all 3 data-bearing nodes, not just 2.
+ {"_id": 3, "host": nodes[3], arbiterOnly: true},
+ {"_id": 4, "host": nodes[4], arbiterOnly: true},
+ ]
+};
+
+replTest.initiate(config);
+
+// Get connections.
+var oldPrimary = replTest.getPrimary();
+var newPrimary = replTest._slaves[0];
+var pureSecondary = replTest._slaves[1];
+var arbiters = [replTest.nodes[3], replTest.nodes[4]];
+
+// This is the collection that all of the tests will use.
+var collName = name + '.collection';
+var oldPrimaryColl = oldPrimary.getCollection(collName);
+var newPrimaryColl = newPrimary.getCollection(collName);
+
+// Set up initial state.
+assert.writeOK(oldPrimaryColl.insert({_id: 1, state: 'old'},
+ {writeConcern: {w: 'majority', wtimeout: 30000}}));
+assert.eq(doDirtyRead(oldPrimaryColl), 'old');
+assert.eq(doCommittedRead(oldPrimaryColl), 'old');
+assert.eq(doDirtyRead(newPrimaryColl), 'old');
+// Note that we can't necessarily do a committed read from newPrimaryColl and get 'old', since
+// delivery of the commit level to secondaries isn't synchronized with anything
+// (we would have to hammer to reliably prove that it eventually would work).
+
+// Partition the world such that oldPrimary is still primary but can't replicate to anyone.
+// newPrimary is disconnected from the arbiters first to ensure that it can't be elected.
+newPrimary.disconnect(arbiters);
+oldPrimary.disconnect([newPrimary, pureSecondary]);
+assert.eq(doDirtyRead(newPrimaryColl), 'old');
+
+// This write will only make it to oldPrimary and will never become committed.
+assert.writeOK(oldPrimaryColl.save({_id: 1, state: 'INVALID'}));
+assert.eq(doDirtyRead(oldPrimaryColl), 'INVALID');
+assert.eq(doCommittedRead(oldPrimaryColl), 'old');
+
+// Change the partitioning so that oldPrimary is isolated, and newPrimary can be elected.
+oldPrimary.setSlaveOk();
+oldPrimary.disconnect(arbiters);
+newPrimary.reconnect(arbiters);
+assert.soon(() => newPrimary.adminCommand('isMaster').ismaster, '', 60 * 1000);
+assert.soon(function() {
+ try {
+ return !oldPrimary.adminCommand('isMaster').ismaster;
+ } catch (e) {
+ return false; // ignore disconnect errors.
}
-
- function doCommittedRead(coll) {
- var res =
- coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 10000});
- assert.commandWorked(res,
- 'reading from ' + coll.getFullName() + ' on ' + coll.getMongo().host);
- return new DBCommandCursor(coll.getDB(), res).toArray()[0].state;
+});
+
+// Stop applier on pureSecondary to ensure that writes to newPrimary won't become committed yet.
+assert.commandWorked(
+ pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+assert.writeOK(newPrimaryColl.save({_id: 1, state: 'new'}));
+assert.eq(doDirtyRead(newPrimaryColl), 'new');
+// Note that we still can't do a committed read from the new primary and reliably get anything,
+// since we never proved that it learned about the commit level from the old primary before
+// the new primary got elected. The new primary cannot advance the commit level until it
+// commits a write in its own term. This includes learning that a majority of nodes have
+// received such a write.
+assert.eq(doCommittedRead(oldPrimaryColl), 'old');
+
+// Reconnect oldPrimary to newPrimary, inducing rollback of the 'INVALID' write. This causes
+// oldPrimary to clear its read majority point. oldPrimary still won't be connected to enough
+// hosts to allow it to be elected, so newPrimary should stay primary for the rest of this test.
+oldPrimary.reconnect(newPrimary);
+assert.soon(function() {
+ try {
+ return oldPrimary.adminCommand('isMaster').secondary &&
+ doDirtyRead(oldPrimaryColl) == 'new';
+ } catch (e) {
+ return false; // ignore disconnect errors.
}
-
- function doDirtyRead(coll) {
- var res = coll.runCommand('find', {"readConcern": {"level": "local"}});
- assert.commandWorked(res,
- 'reading from ' + coll.getFullName() + ' on ' + coll.getMongo().host);
- return new DBCommandCursor(coll.getDB(), res).toArray()[0].state;
- }
-
- // Set up a set and grab things for later.
- var name = "read_committed_after_rollback";
- var replTest = new ReplSetTest(
- {name: name, nodes: 5, useBridge: true, nodeOptions: {enableMajorityReadConcern: ''}});
-
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
-
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], priority: 0},
- // Note: using two arbiters to ensure that a host that can't talk to any other
- // data-bearing node can still be elected. This also means that a write isn't considered
- // committed until it is on all 3 data-bearing nodes, not just 2.
- {"_id": 3, "host": nodes[3], arbiterOnly: true},
- {"_id": 4, "host": nodes[4], arbiterOnly: true},
- ]
- };
-
- replTest.initiate(config);
-
- // Get connections.
- var oldPrimary = replTest.getPrimary();
- var newPrimary = replTest._slaves[0];
- var pureSecondary = replTest._slaves[1];
- var arbiters = [replTest.nodes[3], replTest.nodes[4]];
-
- // This is the collection that all of the tests will use.
- var collName = name + '.collection';
- var oldPrimaryColl = oldPrimary.getCollection(collName);
- var newPrimaryColl = newPrimary.getCollection(collName);
-
- // Set up initial state.
- assert.writeOK(oldPrimaryColl.insert({_id: 1, state: 'old'},
- {writeConcern: {w: 'majority', wtimeout: 30000}}));
- assert.eq(doDirtyRead(oldPrimaryColl), 'old');
- assert.eq(doCommittedRead(oldPrimaryColl), 'old');
- assert.eq(doDirtyRead(newPrimaryColl), 'old');
- // Note that we can't necessarily do a committed read from newPrimaryColl and get 'old', since
- // delivery of the commit level to secondaries isn't synchronized with anything
- // (we would have to hammer to reliably prove that it eventually would work).
-
- // Partition the world such that oldPrimary is still primary but can't replicate to anyone.
- // newPrimary is disconnected from the arbiters first to ensure that it can't be elected.
- newPrimary.disconnect(arbiters);
- oldPrimary.disconnect([newPrimary, pureSecondary]);
- assert.eq(doDirtyRead(newPrimaryColl), 'old');
-
- // This write will only make it to oldPrimary and will never become committed.
- assert.writeOK(oldPrimaryColl.save({_id: 1, state: 'INVALID'}));
- assert.eq(doDirtyRead(oldPrimaryColl), 'INVALID');
- assert.eq(doCommittedRead(oldPrimaryColl), 'old');
-
- // Change the partitioning so that oldPrimary is isolated, and newPrimary can be elected.
- oldPrimary.setSlaveOk();
- oldPrimary.disconnect(arbiters);
- newPrimary.reconnect(arbiters);
- assert.soon(() => newPrimary.adminCommand('isMaster').ismaster, '', 60 * 1000);
- assert.soon(function() {
- try {
- return !oldPrimary.adminCommand('isMaster').ismaster;
- } catch (e) {
- return false; // ignore disconnect errors.
- }
- });
-
- // Stop applier on pureSecondary to ensure that writes to newPrimary won't become committed yet.
- assert.commandWorked(
- pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- assert.writeOK(newPrimaryColl.save({_id: 1, state: 'new'}));
- assert.eq(doDirtyRead(newPrimaryColl), 'new');
- // Note that we still can't do a committed read from the new primary and reliably get anything,
- // since we never proved that it learned about the commit level from the old primary before
- // the new primary got elected. The new primary cannot advance the commit level until it
- // commits a write in its own term. This includes learning that a majority of nodes have
- // received such a write.
- assert.eq(doCommittedRead(oldPrimaryColl), 'old');
-
- // Reconnect oldPrimary to newPrimary, inducing rollback of the 'INVALID' write. This causes
- // oldPrimary to clear its read majority point. oldPrimary still won't be connected to enough
- // hosts to allow it to be elected, so newPrimary should stay primary for the rest of this test.
- oldPrimary.reconnect(newPrimary);
- assert.soon(function() {
- try {
- return oldPrimary.adminCommand('isMaster').secondary &&
- doDirtyRead(oldPrimaryColl) == 'new';
- } catch (e) {
- return false; // ignore disconnect errors.
- }
- }, '', 60 * 1000);
- assert.eq(doDirtyRead(oldPrimaryColl), 'new');
-
- // Resume oplog application on pureSecondary to allow the 'new' write to be committed. It should
- // now be visible as a committed read to both oldPrimary and newPrimary.
- assert.commandWorked(
- pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- // Do a write to the new primary so that the old primary can establish a sync source to learn
- // about the new commit.
- assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
- {a: 1}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
- assert.eq(doCommittedRead(newPrimaryColl), 'new');
- // Do another write to the new primary so that the old primary can be sure to receive the
- // new committed optime.
- assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
- {a: 2}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
- assert.eq(doCommittedRead(oldPrimaryColl), 'new');
-
- // Verify data consistency between nodes.
- replTest.checkReplicatedDataHashes();
- replTest.checkOplogs();
- replTest.stopSet();
+}, '', 60 * 1000);
+assert.eq(doDirtyRead(oldPrimaryColl), 'new');
+
+// Resume oplog application on pureSecondary to allow the 'new' write to be committed. It should
+// now be visible as a committed read to both oldPrimary and newPrimary.
+assert.commandWorked(
+ pureSecondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+// Do a write to the new primary so that the old primary can establish a sync source to learn
+// about the new commit.
+assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
+ {a: 1}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
+assert.eq(doCommittedRead(newPrimaryColl), 'new');
+// Do another write to the new primary so that the old primary can be sure to receive the
+// new committed optime.
+assert.writeOK(newPrimary.getDB(name).unrelatedCollection.insert(
+ {a: 2}, {writeConcern: {w: 'majority', wtimeout: replTest.kDefaultTimeoutMS}}));
+assert.eq(doCommittedRead(oldPrimaryColl), 'new');
+
+// Verify data consistency between nodes.
+replTest.checkReplicatedDataHashes();
+replTest.checkOplogs();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_committed_lookup.js b/jstests/replsets/read_committed_lookup.js
index fbd4c6f5d19..18f77f9237a 100644
--- a/jstests/replsets/read_committed_lookup.js
+++ b/jstests/replsets/read_committed_lookup.js
@@ -7,40 +7,40 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajori
load("jstests/libs/read_committed_lib.js"); // For testReadCommittedLookup
(function() {
- "use strict";
-
- // Confirm majority readConcern works on a replica set.
- const replSetName = "lookup_read_majority";
- let rst = new ReplSetTest({
- nodes: 3,
- name: replSetName,
- nodeOptions: {
- enableMajorityReadConcern: "",
- shardsvr: "",
- }
- });
-
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
+"use strict";
+
+// Confirm majority readConcern works on a replica set.
+const replSetName = "lookup_read_majority";
+let rst = new ReplSetTest({
+ nodes: 3,
+ name: replSetName,
+ nodeOptions: {
+ enableMajorityReadConcern: "",
+ shardsvr: "",
}
+});
- const nodes = rst.nodeList();
- const config = {
- _id: replSetName,
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], priority: 0},
- {_id: 2, host: nodes[2], arbiterOnly: true},
- ]
- };
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ rst.stopSet();
+ return;
+}
- rst.initiate(config);
+const nodes = rst.nodeList();
+const config = {
+ _id: replSetName,
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], priority: 0},
+ {_id: 2, host: nodes[2], arbiterOnly: true},
+ ]
+};
- let shardSecondary = rst._slaves[0];
+rst.initiate(config);
- testReadCommittedLookup(rst.getPrimary().getDB("test"), shardSecondary, rst);
+let shardSecondary = rst._slaves[0];
- rst.stopSet();
+testReadCommittedLookup(rst.getPrimary().getDB("test"), shardSecondary, rst);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/read_committed_no_snapshots.js b/jstests/replsets/read_committed_no_snapshots.js
index 59524c24bd2..9cb4835727c 100644
--- a/jstests/replsets/read_committed_no_snapshots.js
+++ b/jstests/replsets/read_committed_no_snapshots.js
@@ -8,77 +8,74 @@
load("jstests/replsets/rslib.js"); // For reconfig and startSetIfSupportsReadMajority.
(function() {
- "use strict";
+"use strict";
- // Set up a set and grab things for later.
- var name = "read_committed_no_snapshots";
- var replTest = new ReplSetTest({
- name: name,
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- {
- setParameter: {"failpoint.disableSnapshotting": "{'mode':'alwaysOn'}"},
- rsConfig: {priority: 0}
- }
- ],
- nodeOptions: {enableMajorityReadConcern: ''},
- settings: {protocolVersion: 1}
- });
+// Set up a set and grab things for later.
+var name = "read_committed_no_snapshots";
+var replTest = new ReplSetTest({
+ name: name,
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ {
+ setParameter: {"failpoint.disableSnapshotting": "{'mode':'alwaysOn'}"},
+ rsConfig: {priority: 0}
+ }
+ ],
+ nodeOptions: {enableMajorityReadConcern: ''},
+ settings: {protocolVersion: 1}
+});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
- // Cannot wait for a stable recovery timestamp due to the no-snapshot secondary.
- replTest.initiateWithAnyNodeAsPrimary(
- null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
+// Cannot wait for a stable recovery timestamp due to the no-snapshot secondary.
+replTest.initiateWithAnyNodeAsPrimary(
+ null, "replSetInitiate", {doNotWaitForStableRecoveryTimestamp: true});
- // Get connections and collection.
- var primary = replTest.getPrimary();
- var healthySecondary = replTest._slaves[0];
- healthySecondary.setSlaveOk();
- var noSnapshotSecondary = replTest._slaves[1];
- noSnapshotSecondary.setSlaveOk();
+// Get connections and collection.
+var primary = replTest.getPrimary();
+var healthySecondary = replTest._slaves[0];
+healthySecondary.setSlaveOk();
+var noSnapshotSecondary = replTest._slaves[1];
+noSnapshotSecondary.setSlaveOk();
- // Do a write, wait for it to replicate, and ensure it is visible.
- var res = primary.getDB(name).runCommandWithMetadata( //
- {
- insert: "foo",
- documents: [{_id: 1, state: 0}],
- writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}
- },
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
+// Do a write, wait for it to replicate, and ensure it is visible.
+var res = primary.getDB(name).runCommandWithMetadata( //
+ {
+ insert: "foo",
+ documents: [{_id: 1, state: 0}],
+ writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}
+ },
+ {"$replData": 1});
+assert.commandWorked(res.commandReply);
- // We need to propagate the lastOpVisible from the primary as afterOpTime in the secondaries to
- // ensure we wait for the write to be in the majority committed view.
- var lastOp = res.commandReply["$replData"].lastOpVisible;
+// We need to propagate the lastOpVisible from the primary as afterOpTime in the secondaries to
+// ensure we wait for the write to be in the majority committed view.
+var lastOp = res.commandReply["$replData"].lastOpVisible;
- // Timeout is based on heartbeat timeout.
- assert.commandWorked(healthySecondary.getDB(name).foo.runCommand(
- 'find',
- {"readConcern": {"level": "majority", "afterOpTime": lastOp}, "maxTimeMS": 10 * 1000}));
+// Timeout is based on heartbeat timeout.
+assert.commandWorked(healthySecondary.getDB(name).foo.runCommand(
+ 'find', {"readConcern": {"level": "majority", "afterOpTime": lastOp}, "maxTimeMS": 10 * 1000}));
- // Ensure maxTimeMS times out while waiting for this snapshot
- assert.commandFailedWithCode(
- noSnapshotSecondary.getDB(name).foo.runCommand(
- 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}),
- ErrorCodes.MaxTimeMSExpired);
+// Ensure maxTimeMS times out while waiting for this snapshot
+assert.commandFailedWithCode(noSnapshotSecondary.getDB(name).foo.runCommand(
+ 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}),
+ ErrorCodes.MaxTimeMSExpired);
- // Reconfig to make the no-snapshot secondary the primary
- var config = primary.getDB("local").system.replset.findOne();
- config.members[0].priority = 0;
- config.members[2].priority = 1;
- config.version++;
- primary = reconfig(replTest, config, true);
+// Reconfig to make the no-snapshot secondary the primary
+var config = primary.getDB("local").system.replset.findOne();
+config.members[0].priority = 0;
+config.members[2].priority = 1;
+config.version++;
+primary = reconfig(replTest, config, true);
- // Ensure maxTimeMS times out while waiting for this snapshot
- assert.commandFailedWithCode(
- primary.getSiblingDB(name).foo.runCommand(
- 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}),
- ErrorCodes.MaxTimeMSExpired);
- replTest.stopSet();
+// Ensure maxTimeMS times out while waiting for this snapshot
+assert.commandFailedWithCode(primary.getSiblingDB(name).foo.runCommand(
+ 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+replTest.stopSet();
})();
diff --git a/jstests/replsets/read_committed_on_secondary.js b/jstests/replsets/read_committed_on_secondary.js
index 824a0f2e0bd..ae999799879 100644
--- a/jstests/replsets/read_committed_on_secondary.js
+++ b/jstests/replsets/read_committed_on_secondary.js
@@ -7,131 +7,131 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
-
- function printStatus() {
- var primaryStatus;
- replTest.nodes.forEach((n) => {
- var status = n.getDB("admin").runCommand("replSetGetStatus");
- var self = status.members.filter((m) => m.self)[0];
- var msg = self.name + "\n";
- msg += tojson(status.optimes) + "\n";
- if (self.state == 1) { // Primary status.
- // List other members status from the primaries perspective
- msg += tojson(status.members.filter((m) => !m.self)) + "\n";
- msg += tojson(status.slaveInfo) + "\n";
- }
- jsTest.log(msg);
- });
- }
-
- function log(arg) {
- jsTest.log(tojson(arg));
- }
- // Set up a set and grab things for later.
- var name = "read_committed_on_secondary";
- var replTest =
- new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
-
- if (!startSetIfSupportsReadMajority(replTest)) {
- log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
-
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- };
-
- replTest.initiate(config);
-
- // Get connections and collection.
- var primary = replTest.getPrimary();
- var secondary = replTest._slaves[0];
- var secondaryId = replTest.getNodeId(secondary);
-
- var dbPrimary = primary.getDB(name);
- var collPrimary = dbPrimary[name];
-
- var dbSecondary = secondary.getDB(name);
- var collSecondary = dbSecondary[name];
-
- function saveDoc(state) {
- log("saving doc.");
- var res = dbPrimary.runCommandWithMetadata( //
- {
- update: name,
- writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS},
- updates: [{q: {_id: 1}, u: {_id: 1, state: state}, upsert: true}],
- },
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
- assert.eq(res.commandReply.writeErrors, undefined);
- log("done saving doc: optime " + tojson(res.commandReply.$replData.lastOpVisible));
- return res.commandReply.$replData.lastOpVisible;
- }
-
- function doDirtyRead(lastOp) {
- log("doing dirty read for lastOp:" + tojson(lastOp));
- var res = collSecondary.runCommand('find', {
- "readConcern": {"level": "local", "afterOpTime": lastOp},
- "maxTimeMS": replTest.kDefaultTimeoutMS
- });
- assert.commandWorked(res);
- log("done doing dirty read.");
- return new DBCommandCursor(dbSecondary, res).toArray()[0].state;
- }
-
- function doCommittedRead(lastOp) {
- log("doing committed read for optime: " + tojson(lastOp));
- var res = collSecondary.runCommand('find', {
- "readConcern": {"level": "majority", "afterOpTime": lastOp},
- "maxTimeMS": replTest.kDefaultTimeoutMS
- });
- assert.commandWorked(res);
- log("done doing committed read.");
- return new DBCommandCursor(dbSecondary, res).toArray()[0].state;
- }
-
- // Do a write, wait for it to replicate, and ensure it is visible.
- var op0 = saveDoc(0);
- assert.eq(doDirtyRead(op0), 0);
-
- printStatus();
- assert.eq(doCommittedRead(op0), 0);
-
- // Disable snapshotting on the secondary.
- secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'});
-
- // Do a write and ensure it is only visible to dirty reads
- var op1 = saveDoc(1);
- assert.eq(doDirtyRead(op1), 1);
- assert.eq(doCommittedRead(op0), 0);
-
- // Try the committed read again after sleeping to ensure it doesn't only work for queries
- // immediately after the write.
- log("sleeping");
- sleep(1000);
- assert.eq(doCommittedRead(op0), 0);
-
- // Reenable snapshotting on the secondary and ensure that committed reads are able to see the
- // new
- // state.
- log("turning off failpoint");
- secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'});
- // Do another write in order to update the committedSnapshot value.
- var op2 = saveDoc(2);
- assert.eq(doDirtyRead(op2), 2);
- log(replTest.status());
- replTest.awaitReplication();
- log(replTest.status());
- assert.eq(doCommittedRead(op2), 2);
- log("test success!");
+"use strict";
+
+function printStatus() {
+ var primaryStatus;
+ replTest.nodes.forEach((n) => {
+ var status = n.getDB("admin").runCommand("replSetGetStatus");
+ var self = status.members.filter((m) => m.self)[0];
+ var msg = self.name + "\n";
+ msg += tojson(status.optimes) + "\n";
+ if (self.state == 1) { // Primary status.
+ // List other members status from the primaries perspective
+ msg += tojson(status.members.filter((m) => !m.self)) + "\n";
+ msg += tojson(status.slaveInfo) + "\n";
+ }
+ jsTest.log(msg);
+ });
+}
+
+function log(arg) {
+ jsTest.log(tojson(arg));
+}
+// Set up a set and grab things for later.
+var name = "read_committed_on_secondary";
+var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+
+if (!startSetIfSupportsReadMajority(replTest)) {
+ log("skipping test since storage engine doesn't support committed reads");
replTest.stopSet();
+ return;
+}
+
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+};
+
+replTest.initiate(config);
+
+// Get connections and collection.
+var primary = replTest.getPrimary();
+var secondary = replTest._slaves[0];
+var secondaryId = replTest.getNodeId(secondary);
+
+var dbPrimary = primary.getDB(name);
+var collPrimary = dbPrimary[name];
+
+var dbSecondary = secondary.getDB(name);
+var collSecondary = dbSecondary[name];
+
+function saveDoc(state) {
+ log("saving doc.");
+ var res = dbPrimary.runCommandWithMetadata( //
+ {
+ update: name,
+ writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS},
+ updates: [{q: {_id: 1}, u: {_id: 1, state: state}, upsert: true}],
+ },
+ {"$replData": 1});
+ assert.commandWorked(res.commandReply);
+ assert.eq(res.commandReply.writeErrors, undefined);
+ log("done saving doc: optime " + tojson(res.commandReply.$replData.lastOpVisible));
+ return res.commandReply.$replData.lastOpVisible;
+}
+
+function doDirtyRead(lastOp) {
+ log("doing dirty read for lastOp:" + tojson(lastOp));
+ var res = collSecondary.runCommand('find', {
+ "readConcern": {"level": "local", "afterOpTime": lastOp},
+ "maxTimeMS": replTest.kDefaultTimeoutMS
+ });
+ assert.commandWorked(res);
+ log("done doing dirty read.");
+ return new DBCommandCursor(dbSecondary, res).toArray()[0].state;
+}
+
+function doCommittedRead(lastOp) {
+ log("doing committed read for optime: " + tojson(lastOp));
+ var res = collSecondary.runCommand('find', {
+ "readConcern": {"level": "majority", "afterOpTime": lastOp},
+ "maxTimeMS": replTest.kDefaultTimeoutMS
+ });
+ assert.commandWorked(res);
+ log("done doing committed read.");
+ return new DBCommandCursor(dbSecondary, res).toArray()[0].state;
+}
+
+// Do a write, wait for it to replicate, and ensure it is visible.
+var op0 = saveDoc(0);
+assert.eq(doDirtyRead(op0), 0);
+
+printStatus();
+assert.eq(doCommittedRead(op0), 0);
+
+// Disable snapshotting on the secondary.
+secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'});
+
+// Do a write and ensure it is only visible to dirty reads
+var op1 = saveDoc(1);
+assert.eq(doDirtyRead(op1), 1);
+assert.eq(doCommittedRead(op0), 0);
+
+// Try the committed read again after sleeping to ensure it doesn't only work for queries
+// immediately after the write.
+log("sleeping");
+sleep(1000);
+assert.eq(doCommittedRead(op0), 0);
+
+// Reenable snapshotting on the secondary and ensure that committed reads are able to see the
+// new
+// state.
+log("turning off failpoint");
+secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'});
+// Do another write in order to update the committedSnapshot value.
+var op2 = saveDoc(2);
+assert.eq(doDirtyRead(op2), 2);
+log(replTest.status());
+replTest.awaitReplication();
+log(replTest.status());
+assert.eq(doCommittedRead(op2), 2);
+log("test success!");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_committed_stale_history.js b/jstests/replsets/read_committed_stale_history.js
index 3ee22559749..f40841575f4 100644
--- a/jstests/replsets/read_committed_stale_history.js
+++ b/jstests/replsets/read_committed_stale_history.js
@@ -3,144 +3,144 @@
* when hearing about a commit point with a higher optime from a new primary.
*/
(function() {
- 'use strict';
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/replsets/rslib.js");
-
- var name = "readCommittedStaleHistory";
- var dbName = "wMajorityCheck";
- var collName = "stepdown";
-
- var rst = new ReplSetTest({
- name: name,
- nodes: [
- {},
- {},
- {rsConfig: {priority: 0}},
- ],
- nodeOptions: {enableMajorityReadConcern: ""},
- useBridge: true
- });
+'use strict';
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/rslib.js");
+
+var name = "readCommittedStaleHistory";
+var dbName = "wMajorityCheck";
+var collName = "stepdown";
+
+var rst = new ReplSetTest({
+ name: name,
+ nodes: [
+ {},
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+ nodeOptions: {enableMajorityReadConcern: ""},
+ useBridge: true
+});
+
+if (!startSetIfSupportsReadMajority(rst)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ rst.stopSet();
+ return;
+}
- if (!startSetIfSupportsReadMajority(rst)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- rst.stopSet();
- return;
- }
-
- var nodes = rst.nodes;
- rst.initiate();
-
- /**
- * Waits for the given node to be in state primary *and* have finished drain mode and thus
- * be available for writes.
- */
- function waitForPrimary(node) {
- assert.soon(function() {
- return node.adminCommand('ismaster').ismaster;
- });
- }
-
- // Asserts that the given document is not visible in the committed snapshot on the given node.
- function checkDocNotCommitted(node, doc) {
- var docs =
- node.getDB(dbName).getCollection(collName).find(doc).readConcern('majority').toArray();
- assert.eq(0, docs.length, tojson(docs));
- }
-
- // SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the correct
- // size for faster startup, so nodes[0] is always the first primary.
- jsTestLog("Make sure node 0 is primary.");
- var primary = rst.getPrimary();
- var secondaries = rst.getSecondaries();
- assert.eq(nodes[0], primary);
- // Wait for all data bearing nodes to get up to date.
- assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
- {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
-
- // Stop the secondaries from replicating.
- stopServerReplication(secondaries);
- // Stop the primary from being able to complete stepping down.
- assert.commandWorked(
- nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'}));
-
- jsTestLog("Do a write that won't ever reach a majority of nodes");
- assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert({a: 2}));
-
- // Ensure that the write that was just done is not visible in the committed snapshot.
- checkDocNotCommitted(nodes[0], {a: 2});
-
- // Prevent the primary from rolling back later on.
- assert.commandWorked(
- nodes[0].adminCommand({configureFailPoint: 'rollbackHangBeforeStart', mode: 'alwaysOn'}));
-
- jsTest.log("Disconnect primary from all secondaries");
- nodes[0].disconnect(nodes[1]);
- nodes[0].disconnect(nodes[2]);
-
- // Ensure the soon-to-be primary cannot see the write from the old primary.
- assert.eq(null, nodes[1].getDB(dbName).getCollection(collName).findOne({a: 2}));
-
- jsTest.log("Wait for a new primary to be elected");
- // Allow the secondaries to replicate again.
- restartServerReplication(secondaries);
-
- waitForPrimary(nodes[1]);
-
- jsTest.log("Do a write to the new primary");
- assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
- {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
-
- // Ensure the new primary still cannot see the write from the old primary.
- assert.eq(null, nodes[1].getDB(dbName).getCollection(collName).findOne({a: 2}));
-
- jsTest.log("Reconnect the old primary to the rest of the nodes");
- nodes[1].reconnect(nodes[0]);
- nodes[2].reconnect(nodes[0]);
-
- // Sleep 10 seconds to allow some heartbeats to be processed, so we can verify that the
- // heartbeats don't cause the stale primary to incorrectly advance the commit point.
- sleep(10000);
-
- checkDocNotCommitted(nodes[0], {a: 2});
-
- jsTest.log("Allow the old primary to finish stepping down and become secondary");
- var res = null;
- try {
- res = nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'off'});
- } catch (e) {
- // Expected - once we disable the fail point the stepdown will proceed and it's racy whether
- // the stepdown closes all connections before or after the configureFailPoint command
- // returns
- }
- if (res) {
- assert.commandWorked(res);
- }
- rst.waitForState(nodes[0], ReplSetTest.State.SECONDARY);
- reconnect(nodes[0]);
-
- // At this point the former primary will attempt to go into rollback, but the
- // 'rollbackHangBeforeStart' will prevent it from doing so.
- checkDocNotCommitted(nodes[0], {a: 2});
- checkLog.contains(nodes[0], 'rollback - rollbackHangBeforeStart fail point enabled');
- checkDocNotCommitted(nodes[0], {a: 2});
-
- jsTest.log("Allow the original primary to roll back its write and catch up to the new primary");
- assert.adminCommandWorkedAllowingNetworkError(
- nodes[0], {configureFailPoint: 'rollbackHangBeforeStart', mode: 'off'});
-
- assert.soonNoExcept(function() {
- return null == nodes[0].getDB(dbName).getCollection(collName).findOne({a: 2});
- }, "Original primary never rolled back its write");
-
- rst.awaitReplication();
-
- // Ensure that the old primary got the write that the new primary did and sees it as committed.
- assert.neq(
- null,
- nodes[0].getDB(dbName).getCollection(collName).find({a: 3}).readConcern('majority').next());
+var nodes = rst.nodes;
+rst.initiate();
- rst.stopSet();
+/**
+ * Waits for the given node to be in state primary *and* have finished drain mode and thus
+ * be available for writes.
+ */
+function waitForPrimary(node) {
+ assert.soon(function() {
+ return node.adminCommand('ismaster').ismaster;
+ });
+}
+
+// Asserts that the given document is not visible in the committed snapshot on the given node.
+function checkDocNotCommitted(node, doc) {
+ var docs =
+ node.getDB(dbName).getCollection(collName).find(doc).readConcern('majority').toArray();
+ assert.eq(0, docs.length, tojson(docs));
+}
+
+// SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the correct
+// size for faster startup, so nodes[0] is always the first primary.
+jsTestLog("Make sure node 0 is primary.");
+var primary = rst.getPrimary();
+var secondaries = rst.getSecondaries();
+assert.eq(nodes[0], primary);
+// Wait for all data bearing nodes to get up to date.
+assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
+ {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
+
+// Stop the secondaries from replicating.
+stopServerReplication(secondaries);
+// Stop the primary from being able to complete stepping down.
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'}));
+
+jsTestLog("Do a write that won't ever reach a majority of nodes");
+assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert({a: 2}));
+
+// Ensure that the write that was just done is not visible in the committed snapshot.
+checkDocNotCommitted(nodes[0], {a: 2});
+
+// Prevent the primary from rolling back later on.
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'rollbackHangBeforeStart', mode: 'alwaysOn'}));
+
+jsTest.log("Disconnect primary from all secondaries");
+nodes[0].disconnect(nodes[1]);
+nodes[0].disconnect(nodes[2]);
+
+// Ensure the soon-to-be primary cannot see the write from the old primary.
+assert.eq(null, nodes[1].getDB(dbName).getCollection(collName).findOne({a: 2}));
+
+jsTest.log("Wait for a new primary to be elected");
+// Allow the secondaries to replicate again.
+restartServerReplication(secondaries);
+
+waitForPrimary(nodes[1]);
+
+jsTest.log("Do a write to the new primary");
+assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
+ {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
+
+// Ensure the new primary still cannot see the write from the old primary.
+assert.eq(null, nodes[1].getDB(dbName).getCollection(collName).findOne({a: 2}));
+
+jsTest.log("Reconnect the old primary to the rest of the nodes");
+nodes[1].reconnect(nodes[0]);
+nodes[2].reconnect(nodes[0]);
+
+// Sleep 10 seconds to allow some heartbeats to be processed, so we can verify that the
+// heartbeats don't cause the stale primary to incorrectly advance the commit point.
+sleep(10000);
+
+checkDocNotCommitted(nodes[0], {a: 2});
+
+jsTest.log("Allow the old primary to finish stepping down and become secondary");
+var res = null;
+try {
+ res = nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'off'});
+} catch (e) {
+ // Expected - once we disable the fail point the stepdown will proceed and it's racy whether
+ // the stepdown closes all connections before or after the configureFailPoint command
+ // returns
+}
+if (res) {
+ assert.commandWorked(res);
+}
+rst.waitForState(nodes[0], ReplSetTest.State.SECONDARY);
+reconnect(nodes[0]);
+
+// At this point the former primary will attempt to go into rollback, but the
+// 'rollbackHangBeforeStart' will prevent it from doing so.
+checkDocNotCommitted(nodes[0], {a: 2});
+checkLog.contains(nodes[0], 'rollback - rollbackHangBeforeStart fail point enabled');
+checkDocNotCommitted(nodes[0], {a: 2});
+
+jsTest.log("Allow the original primary to roll back its write and catch up to the new primary");
+assert.adminCommandWorkedAllowingNetworkError(
+ nodes[0], {configureFailPoint: 'rollbackHangBeforeStart', mode: 'off'});
+
+assert.soonNoExcept(function() {
+ return null == nodes[0].getDB(dbName).getCollection(collName).findOne({a: 2});
+}, "Original primary never rolled back its write");
+
+rst.awaitReplication();
+
+// Ensure that the old primary got the write that the new primary did and sees it as committed.
+assert.neq(
+ null,
+ nodes[0].getDB(dbName).getCollection(collName).find({a: 3}).readConcern('majority').next());
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/read_committed_with_catalog_changes.js b/jstests/replsets/read_committed_with_catalog_changes.js
index 0213aebe36a..2e548a20095 100644
--- a/jstests/replsets/read_committed_with_catalog_changes.js
+++ b/jstests/replsets/read_committed_with_catalog_changes.js
@@ -26,319 +26,316 @@ load("jstests/libs/parallelTester.js"); // For ScopedThread.
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
+"use strict";
- // Each test case includes a 'prepare' method that sets up the initial state starting with a
- // database that has been dropped, a 'performOp' method that does some operation, and two
- // arrays, 'blockedCollections' and 'unblockedCollections', that list the collections that
- // should be blocked or unblocked between the time the operation is performed until it is
- // committed. If the operation is local only and isn't replicated, the test case should include
- // a 'localOnly' field set to true. Test cases are not allowed to touch any databases other than
- // the one passed in.
- const testCases = {
- createCollectionInExistingDB: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- },
- performOp: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+// Each test case includes a 'prepare' method that sets up the initial state starting with a
+// database that has been dropped, a 'performOp' method that does some operation, and two
+// arrays, 'blockedCollections' and 'unblockedCollections', that list the collections that
+// should be blocked or unblocked between the time the operation is performed until it is
+// committed. If the operation is local only and isn't replicated, the test case should include
+// a 'localOnly' field set to true. Test cases are not allowed to touch any databases other than
+// the one passed in.
+const testCases = {
+ createCollectionInExistingDB: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
},
- createCollectionInNewDB: {
- prepare: function(db) {},
- performOp: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['otherDoesNotExist'], // Only existent collections are blocked.
+ performOp: function(db) {
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- dropCollection: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- performOp: function(db) {
- assert(db.coll.drop());
- },
- blockedCollections: [],
- unblockedCollections: ['coll', 'other'],
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
+ createCollectionInNewDB: {
+ prepare: function(db) {},
+ performOp: function(db) {
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- dropDB: {
- prepare: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
- // Drop collection explicitly during the preparation phase while we are still able
- // to write to a majority. Otherwise, dropDatabase() will drop the collection
- // and wait for the collection drop to be replicated to a majority of the nodes.
- assert(db.coll.drop());
- },
- performOp: function(db) {
- assert.commandWorked(db.dropDatabase({w: 1}));
- },
- blockedCollections: [],
- unblockedCollections: ['coll'],
+ blockedCollections: ['coll'],
+ unblockedCollections: ['otherDoesNotExist'], // Only existent collections are blocked.
+ },
+ dropCollection: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- dropAndRecreateCollection: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- performOp: function(db) {
- assert(db.coll.drop());
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+ performOp: function(db) {
+ assert(db.coll.drop());
},
- dropAndRecreateDB: {
- prepare: function(db) {
- assert.writeOK(db.coll.insert({_id: 1}));
- // Drop collection explicitly during the preparation phase while we are still able
- // to write to a majority. Otherwise, dropDatabase() will drop the collection
- // and wait for the collection drop to be replicated to a majority of the nodes.
- assert(db.coll.drop());
- },
- performOp: function(db) {
- assert.commandWorked(db.dropDatabase({w: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['otherDoesNotExist'],
+ blockedCollections: [],
+ unblockedCollections: ['coll', 'other'],
+ },
+ dropDB: {
+ prepare: function(db) {
+ assert.writeOK(db.coll.insert({_id: 1}));
+ // Drop collection explicitly during the preparation phase while we are still able
+ // to write to a majority. Otherwise, dropDatabase() will drop the collection
+ // and wait for the collection drop to be replicated to a majority of the nodes.
+ assert(db.coll.drop());
},
- renameCollectionToNewName: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.from.insert({_id: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.from.renameCollection('coll'));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other', 'from' /*doesNotExist*/],
+ performOp: function(db) {
+ assert.commandWorked(db.dropDatabase({w: 1}));
},
- renameCollectionToExistingName: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.from.insert({_id: 'from'}));
- assert.writeOK(db.coll.insert({_id: 'coll'}));
- },
- performOp: function(db) {
- assert.commandWorked(db.from.renameCollection('coll', true));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other', 'from' /*doesNotExist*/],
+ blockedCollections: [],
+ unblockedCollections: ['coll'],
+ },
+ dropAndRecreateCollection: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- createIndexForeground: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: false}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+ performOp: function(db) {
+ assert(db.coll.drop());
+ assert.writeOK(db.coll.insert({_id: 1}));
},
- createIndexBackground: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: true}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
+ dropAndRecreateDB: {
+ prepare: function(db) {
+ assert.writeOK(db.coll.insert({_id: 1}));
+ // Drop collection explicitly during the preparation phase while we are still able
+ // to write to a majority. Otherwise, dropDatabase() will drop the collection
+ // and wait for the collection drop to be replicated to a majority of the nodes.
+ assert(db.coll.drop());
},
- dropIndex: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- assert.commandWorked(db.coll.ensureIndex({x: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.coll.dropIndex({x: 1}));
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
+ performOp: function(db) {
+ assert.commandWorked(db.dropDatabase({w: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
},
-
- // Remaining cases are local-only operations.
- reIndex: {
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- assert.commandWorked(db.coll.ensureIndex({x: 1}));
- },
- performOp: function(db) {
- assert.commandWorked(db.coll.reIndex());
- },
- blockedCollections: ['coll'],
- unblockedCollections: ['other'],
- localOnly: true,
+ blockedCollections: ['coll'],
+ unblockedCollections: ['otherDoesNotExist'],
+ },
+ renameCollectionToNewName: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.from.insert({_id: 1}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.from.renameCollection('coll'));
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other', 'from' /*doesNotExist*/],
+ },
+ renameCollectionToExistingName: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.from.insert({_id: 'from'}));
+ assert.writeOK(db.coll.insert({_id: 'coll'}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.from.renameCollection('coll', true));
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other', 'from' /*doesNotExist*/],
+ },
+ createIndexForeground: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: false}));
},
- compact: {
- // At least on WiredTiger, compact is fully inplace so it doesn't need to block readers.
- prepare: function(db) {
- assert.writeOK(db.other.insert({_id: 1}));
- assert.writeOK(db.coll.insert({_id: 1}));
- assert.commandWorked(db.coll.ensureIndex({x: 1}));
- },
- performOp: function(db) {
- var res = db.coll.runCommand('compact', {force: true});
- if (res.code != ErrorCodes.CommandNotSupported) {
- // It is fine for a storage engine to support snapshots but not compact. Since
- // compact doesn't block any collections we are fine with doing a no-op here.
- // Other errors should fail the test.
- assert.commandWorked(res);
- }
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
+ createIndexBackground: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.coll.ensureIndex({x: 1}, {background: true}));
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
+ dropIndex: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.ensureIndex({x: 1}));
+ },
+ performOp: function(db) {
+ assert.commandWorked(db.coll.dropIndex({x: 1}));
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ },
- },
- blockedCollections: [],
- unblockedCollections: ['coll', 'other'],
- localOnly: true,
+ // Remaining cases are local-only operations.
+ reIndex: {
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.ensureIndex({x: 1}));
},
- };
+ performOp: function(db) {
+ assert.commandWorked(db.coll.reIndex());
+ },
+ blockedCollections: ['coll'],
+ unblockedCollections: ['other'],
+ localOnly: true,
+ },
+ compact: {
+ // At least on WiredTiger, compact is fully inplace so it doesn't need to block readers.
+ prepare: function(db) {
+ assert.writeOK(db.other.insert({_id: 1}));
+ assert.writeOK(db.coll.insert({_id: 1}));
+ assert.commandWorked(db.coll.ensureIndex({x: 1}));
+ },
+ performOp: function(db) {
+ var res = db.coll.runCommand('compact', {force: true});
+ if (res.code != ErrorCodes.CommandNotSupported) {
+ // It is fine for a storage engine to support snapshots but not compact. Since
+ // compact doesn't block any collections we are fine with doing a no-op here.
+ // Other errors should fail the test.
+ assert.commandWorked(res);
+ }
+ },
+ blockedCollections: [],
+ unblockedCollections: ['coll', 'other'],
+ localOnly: true,
+ },
+};
- // Assertion helpers. These must get all state as arguments rather than through closure since
- // they may be passed in to a ScopedThread.
- function assertReadsBlock(coll) {
- var res =
- coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 5000});
- assert.commandFailedWithCode(res,
- ErrorCodes.MaxTimeMSExpired,
- "Expected read of " + coll.getFullName() + " to block");
- }
+// Assertion helpers. These must get all state as arguments rather than through closure since
+// they may be passed in to a ScopedThread.
+function assertReadsBlock(coll) {
+ var res = coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 5000});
+ assert.commandFailedWithCode(
+ res, ErrorCodes.MaxTimeMSExpired, "Expected read of " + coll.getFullName() + " to block");
+}
- function assertReadsSucceed(coll, timeoutMs = 20000) {
- var res =
- coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": timeoutMs});
- assert.commandWorked(res, 'reading from ' + coll.getFullName());
- // Exhaust the cursor to avoid leaking cursors on the server.
- new DBCommandCursor(coll.getDB(), res).itcount();
- }
+function assertReadsSucceed(coll, timeoutMs = 20000) {
+ var res =
+ coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": timeoutMs});
+ assert.commandWorked(res, 'reading from ' + coll.getFullName());
+ // Exhaust the cursor to avoid leaking cursors on the server.
+ new DBCommandCursor(coll.getDB(), res).itcount();
+}
- // Set up a set and grab things for later.
- var name = "read_committed_with_catalog_changes";
- var replTest =
- new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+// Set up a set and grab things for later.
+var name = "read_committed_with_catalog_changes";
+var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- };
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+};
- replTest.initiate(config);
+replTest.initiate(config);
- // Get connections.
- var primary = replTest.getPrimary();
- var secondary = replTest._slaves[0];
+// Get connections.
+var primary = replTest.getPrimary();
+var secondary = replTest._slaves[0];
- // This is the DB that all of the tests will use.
- var mainDB = primary.getDB('mainDB');
+// This is the DB that all of the tests will use.
+var mainDB = primary.getDB('mainDB');
- // This DB won't be used by any tests so it should always be unblocked.
- var otherDB = primary.getDB('otherDB');
- var otherDBCollection = otherDB.collection;
- assert.writeOK(otherDBCollection.insert(
- {}, {writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- assertReadsSucceed(otherDBCollection);
+// This DB won't be used by any tests so it should always be unblocked.
+var otherDB = primary.getDB('otherDB');
+var otherDBCollection = otherDB.collection;
+assert.writeOK(otherDBCollection.insert(
+ {}, {writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assertReadsSucceed(otherDBCollection);
- for (var testName in testCases) {
- jsTestLog('Running test ' + testName);
- var test = testCases[testName];
+for (var testName in testCases) {
+ jsTestLog('Running test ' + testName);
+ var test = testCases[testName];
- const setUpInitialState = function setUpInitialState() {
- assert.commandWorked(mainDB.dropDatabase());
- test.prepare(mainDB);
- replTest.awaitReplication();
- // Do some sanity checks.
- assertReadsSucceed(otherDBCollection);
- test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
- test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
- };
-
- // All operations, whether replicated or not, must become visible automatically as long as
- // the secondary is keeping up.
- setUpInitialState();
- test.performOp(mainDB);
+ const setUpInitialState = function setUpInitialState() {
+ assert.commandWorked(mainDB.dropDatabase());
+ test.prepare(mainDB);
+ replTest.awaitReplication();
+ // Do some sanity checks.
assertReadsSucceed(otherDBCollection);
test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+ };
- // Return to the initial state, then stop the secondary from applying new writes to prevent
- // them from becoming committed.
- setUpInitialState();
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+ // All operations, whether replicated or not, must become visible automatically as long as
+ // the secondary is keeping up.
+ setUpInitialState();
+ test.performOp(mainDB);
+ assertReadsSucceed(otherDBCollection);
+ test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+ test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
- // If the tested operation isn't replicated, do a write to the side collection before
- // performing the operation. This will ensure that the operation happens after an
- // uncommitted write which prevents it from immediately being marked as committed.
- if (test.localOnly) {
- assert.writeOK(otherDBCollection.insert({}));
- }
+ // Return to the initial state, then stop the secondary from applying new writes to prevent
+ // them from becoming committed.
+ setUpInitialState();
+ assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- // Perform the op and ensure that blocked collections block and unblocked ones don't.
- test.performOp(mainDB);
- assertReadsSucceed(otherDBCollection);
- test.blockedCollections.forEach((name) => assertReadsBlock(mainDB[name]));
- test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+ // If the tested operation isn't replicated, do a write to the side collection before
+ // performing the operation. This will ensure that the operation happens after an
+ // uncommitted write which prevents it from immediately being marked as committed.
+ if (test.localOnly) {
+ assert.writeOK(otherDBCollection.insert({}));
+ }
- // Use background threads to test that reads that start blocked can complete if the
- // operation they are waiting on becomes committed while the read is still blocked.
- // We don't do this when testing auth because ScopedThread's don't propagate auth
- // credentials.
- var threads = jsTest.options().auth ? [] : test.blockedCollections.map((name) => {
- // This function must get all inputs as arguments and can't use closure because it
- // is used in a ScopedThread.
- function bgThread(host, collection, assertReadsSucceed) {
- // Use a longer timeout since we expect to block for a little while (at least 2
- // seconds).
- assertReadsSucceed(new Mongo(host).getCollection(collection), 30 * 1000);
- }
- var thread = new ScopedThread(
- bgThread, primary.host, mainDB[name].getFullName(), assertReadsSucceed);
- thread.start();
- return thread;
- });
- sleep(1000); // Give the reads a chance to block.
+ // Perform the op and ensure that blocked collections block and unblocked ones don't.
+ test.performOp(mainDB);
+ assertReadsSucceed(otherDBCollection);
+ test.blockedCollections.forEach((name) => assertReadsBlock(mainDB[name]));
+ test.unblockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
- try {
- // Try the committed read again after sleeping to ensure that it still blocks even if it
- // isn't immediately after the operation.
- test.blockedCollections.forEach((name) => assertReadsBlock(mainDB[name]));
+ // Use background threads to test that reads that start blocked can complete if the
+ // operation they are waiting on becomes committed while the read is still blocked.
+ // We don't do this when testing auth because ScopedThread's don't propagate auth
+ // credentials.
+ var threads = jsTest.options().auth ? [] : test.blockedCollections.map((name) => {
+ // This function must get all inputs as arguments and can't use closure because it
+ // is used in a ScopedThread.
+ function bgThread(host, collection, assertReadsSucceed) {
+ // Use a longer timeout since we expect to block for a little while (at least 2
+ // seconds).
+ assertReadsSucceed(new Mongo(host).getCollection(collection), 30 * 1000);
+ }
+ var thread = new ScopedThread(
+ bgThread, primary.host, mainDB[name].getFullName(), assertReadsSucceed);
+ thread.start();
+ return thread;
+ });
+ sleep(1000); // Give the reads a chance to block.
- // Restart oplog application on the secondary and ensure the blocked collections become
- // unblocked.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- replTest.awaitReplication();
- test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+ try {
+ // Try the committed read again after sleeping to ensure that it still blocks even if it
+ // isn't immediately after the operation.
+ test.blockedCollections.forEach((name) => assertReadsBlock(mainDB[name]));
- // Wait for the threads to complete and report any errors encountered from running them.
- threads.forEach((thread) => {
- thread.join();
- thread.join = () => {}; // Make join a no-op for the finally below.
- assert(!thread.hasFailed(), "One of the threads failed. See above for details.");
- });
- } finally {
- // Make sure we wait for all threads to finish.
- threads.forEach(thread => thread.join());
- }
+ // Restart oplog application on the secondary and ensure the blocked collections become
+ // unblocked.
+ assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+ replTest.awaitReplication();
+ test.blockedCollections.forEach((name) => assertReadsSucceed(mainDB[name]));
+
+ // Wait for the threads to complete and report any errors encountered from running them.
+ threads.forEach((thread) => {
+ thread.join();
+ thread.join = () => {}; // Make join a no-op for the finally below.
+ assert(!thread.hasFailed(), "One of the threads failed. See above for details.");
+ });
+ } finally {
+ // Make sure we wait for all threads to finish.
+ threads.forEach(thread => thread.join());
}
+}
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_concern_majority_getmore_secondaries.js b/jstests/replsets/read_concern_majority_getmore_secondaries.js
index 6db3658733e..5d6624d2f37 100644
--- a/jstests/replsets/read_concern_majority_getmore_secondaries.js
+++ b/jstests/replsets/read_concern_majority_getmore_secondaries.js
@@ -1,80 +1,78 @@
// Test that getMore for a majority read on a secondary only reads committed data.
// @tags: [requires_majority_read_concern]
(function() {
- "use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+"use strict";
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const name = "read_concern_majority_getmore_secondaries";
- const replSet = new ReplSetTest({
- name: name,
- nodes:
- [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
- settings: {chainingAllowed: false}
- });
- replSet.startSet();
- replSet.initiate();
+const name = "read_concern_majority_getmore_secondaries";
+const replSet = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}],
+ settings: {chainingAllowed: false}
+});
+replSet.startSet();
+replSet.initiate();
- function stopDataReplication(node) {
- jsTest.log("Stop data replication on " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
- }
+function stopDataReplication(node) {
+ jsTest.log("Stop data replication on " + node.host);
+ assert.commandWorked(
+ node.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}));
+}
- function startDataReplication(node) {
- jsTest.log("Start data replication on " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
- }
+function startDataReplication(node) {
+ jsTest.log("Start data replication on " + node.host);
+ assert.commandWorked(node.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}));
+}
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const primary = replSet.getPrimary();
- const secondaries = replSet.getSecondaries();
- const secondary = secondaries[0];
+const primary = replSet.getPrimary();
+const secondaries = replSet.getSecondaries();
+const secondary = secondaries[0];
- const primaryDB = primary.getDB(dbName);
- const secondaryDB = secondary.getDB(dbName);
+const primaryDB = primary.getDB(dbName);
+const secondaryDB = secondary.getDB(dbName);
- // Insert data on primary and allow it to become committed.
- for (let i = 0; i < 4; i++) {
- assert.commandWorked(primaryDB[collName].insert({_id: i}));
- }
+// Insert data on primary and allow it to become committed.
+for (let i = 0; i < 4; i++) {
+ assert.commandWorked(primaryDB[collName].insert({_id: i}));
+}
- // Await commit.
- replSet.awaitReplication();
- replSet.awaitLastOpCommitted();
+// Await commit.
+replSet.awaitReplication();
+replSet.awaitLastOpCommitted();
- // Stop data replication on 2 secondaries to prevent writes being committed.
- stopDataReplication(secondaries[1]);
- stopDataReplication(secondaries[2]);
+// Stop data replication on 2 secondaries to prevent writes being committed.
+stopDataReplication(secondaries[1]);
+stopDataReplication(secondaries[2]);
- // Write more data to primary.
- for (let i = 4; i < 8; i++) {
- assert.commandWorked(primaryDB[collName].insert({_id: i}, {writeConcern: {w: 2}}));
- }
+// Write more data to primary.
+for (let i = 4; i < 8; i++) {
+ assert.commandWorked(primaryDB[collName].insert({_id: i}, {writeConcern: {w: 2}}));
+}
- // Check that it reached the secondary.
- assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}, {_id: 7}],
- secondaryDB[collName].find().sort({_id: 1}).toArray());
+// Check that it reached the secondary.
+assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}, {_id: 7}],
+ secondaryDB[collName].find().sort({_id: 1}).toArray());
- // It is important that this query does not do an in-memory sort. Otherwise the initial find
- // will consume all of the results from the storage engine in order to sort them, so we will not
- // be testing that the getMore does not read uncommitted data from the storage engine.
- let res = primaryDB[collName].find().sort({_id: 1}).batchSize(2).readConcern("majority");
- assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}], res.toArray());
+// It is important that this query does not do an in-memory sort. Otherwise the initial find
+// will consume all of the results from the storage engine in order to sort them, so we will not
+// be testing that the getMore does not read uncommitted data from the storage engine.
+let res = primaryDB[collName].find().sort({_id: 1}).batchSize(2).readConcern("majority");
+assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}], res.toArray());
- // Similarly, this query must not do an in-memory sort.
- res = secondaryDB[collName].find().sort({_id: 1}).batchSize(2).readConcern("majority");
- assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}], res.toArray());
+// Similarly, this query must not do an in-memory sort.
+res = secondaryDB[collName].find().sort({_id: 1}).batchSize(2).readConcern("majority");
+assert.docEq([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}], res.toArray());
- // Disable failpoints and shutdown.
- replSet.getSecondaries().forEach(startDataReplication);
- replSet.stopSet();
+// Disable failpoints and shutdown.
+replSet.getSecondaries().forEach(startDataReplication);
+replSet.stopSet();
}());
diff --git a/jstests/replsets/read_concern_uninitated_set.js b/jstests/replsets/read_concern_uninitated_set.js
index 52a12a16def..ce9c015cb62 100644
--- a/jstests/replsets/read_concern_uninitated_set.js
+++ b/jstests/replsets/read_concern_uninitated_set.js
@@ -5,58 +5,57 @@
* @tags: [requires_persistence, requires_majority_read_concern]
*/
(function() {
- "use strict";
- // For supportsMajorityReadConcern().
- load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+"use strict";
+// For supportsMajorityReadConcern().
+load("jstests/multiVersion/libs/causal_consistency_helpers.js");
- if (!supportsMajorityReadConcern()) {
- jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
- return;
- }
+if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+}
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- const localDB = rst.nodes[0].getDB('local');
- assert.commandWorked(localDB.test.insert({_id: 0}));
- assert.commandWorked(localDB.runCommand({
- isMaster: 1,
- "$clusterTime": {
- "clusterTime": Timestamp(1, 1),
- "signature":
- {"hash": BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId": NumberLong(0)}
- }
- }));
- jsTestLog("Local readConcern on local database should work.");
- const res = assert.commandWorked(localDB.runCommand(
- {find: "test", filter: {}, maxTimeMS: 60000, readConcern: {level: "local"}}));
- assert.eq([{_id: 0}], res.cursor.firstBatch);
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+const localDB = rst.nodes[0].getDB('local');
+assert.commandWorked(localDB.test.insert({_id: 0}));
+assert.commandWorked(localDB.runCommand({
+ isMaster: 1,
+ "$clusterTime": {
+ "clusterTime": Timestamp(1, 1),
+ "signature": {"hash": BinData(0, "AAAAAAAAAAAAAAAAAAAAAAAAAAA="), "keyId": NumberLong(0)}
+ }
+}));
+jsTestLog("Local readConcern on local database should work.");
+const res = assert.commandWorked(localDB.runCommand(
+ {find: "test", filter: {}, maxTimeMS: 60000, readConcern: {level: "local"}}));
+assert.eq([{_id: 0}], res.cursor.firstBatch);
- jsTestLog("Majority readConcern should fail with NotYetInitialized.");
- assert.commandFailedWithCode(
- localDB.runCommand(
- {find: "test", filter: {}, maxTimeMS: 60000, readConcern: {level: "majority"}}),
- ErrorCodes.NotYetInitialized);
+jsTestLog("Majority readConcern should fail with NotYetInitialized.");
+assert.commandFailedWithCode(
+ localDB.runCommand(
+ {find: "test", filter: {}, maxTimeMS: 60000, readConcern: {level: "majority"}}),
+ ErrorCodes.NotYetInitialized);
- jsTestLog("afterClusterTime readConcern should fail with NotYetInitialized.");
- assert.commandFailedWithCode(localDB.runCommand({
- find: "test",
- filter: {},
- maxTimeMS: 60000,
- readConcern: {afterClusterTime: Timestamp(1, 1)}
- }),
- ErrorCodes.NotYetInitialized);
+jsTestLog("afterClusterTime readConcern should fail with NotYetInitialized.");
+assert.commandFailedWithCode(localDB.runCommand({
+ find: "test",
+ filter: {},
+ maxTimeMS: 60000,
+ readConcern: {afterClusterTime: Timestamp(1, 1)}
+}),
+ ErrorCodes.NotYetInitialized);
- jsTestLog("oplog query should fail with NotYetInitialized.");
- assert.commandFailedWithCode(localDB.runCommand({
- find: "oplog.rs",
- filter: {ts: {$gte: Timestamp(1520004466, 2)}},
- tailable: true,
- awaitData: true,
- maxTimeMS: 60000,
- batchSize: 13981010,
- term: 1,
- readConcern: {afterClusterTime: Timestamp(1, 1)}
- }),
- ErrorCodes.NotYetInitialized);
- rst.stopSet();
+jsTestLog("oplog query should fail with NotYetInitialized.");
+assert.commandFailedWithCode(localDB.runCommand({
+ find: "oplog.rs",
+ filter: {ts: {$gte: Timestamp(1520004466, 2)}},
+ tailable: true,
+ awaitData: true,
+ maxTimeMS: 60000,
+ batchSize: 13981010,
+ term: 1,
+ readConcern: {afterClusterTime: Timestamp(1, 1)}
+}),
+ ErrorCodes.NotYetInitialized);
+rst.stopSet();
}());
diff --git a/jstests/replsets/read_majority_two_arbs.js b/jstests/replsets/read_majority_two_arbs.js
index d2aeea89280..f49ebe71dd7 100644
--- a/jstests/replsets/read_majority_two_arbs.js
+++ b/jstests/replsets/read_majority_two_arbs.js
@@ -6,68 +6,67 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
- "use strict";
+"use strict";
- function log(arg) {
- jsTest.log(tojson(arg));
- }
+function log(arg) {
+ jsTest.log(tojson(arg));
+}
- // Set up a set and grab things for later.
- var name = "read_majority_two_arbs";
- var replTest =
- new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+// Set up a set and grab things for later.
+var name = "read_majority_two_arbs";
+var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
- if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- replTest.stopSet();
- return;
- }
+if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ replTest.stopSet();
+ return;
+}
- var nodes = replTest.nodeList();
- var config = {
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], arbiterOnly: true},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- };
+var nodes = replTest.nodeList();
+var config = {
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], arbiterOnly: true},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+};
- replTest.initiate(config);
+replTest.initiate(config);
- var primary = replTest.getPrimary();
- var db = primary.getDB(name);
- var t = db[name];
+var primary = replTest.getPrimary();
+var db = primary.getDB(name);
+var t = db[name];
- function doRead(readConcern) {
- readConcern.maxTimeMS = 3000;
- var res = assert.commandWorked(t.runCommand('find', readConcern));
- var docs = (new DBCommandCursor(db, res)).toArray();
- assert.gt(docs.length, 0, "no docs returned!");
- return docs[0].state;
- }
+function doRead(readConcern) {
+ readConcern.maxTimeMS = 3000;
+ var res = assert.commandWorked(t.runCommand('find', readConcern));
+ var docs = (new DBCommandCursor(db, res)).toArray();
+ assert.gt(docs.length, 0, "no docs returned!");
+ return docs[0].state;
+}
- function doDirtyRead() {
- log("doing dirty read");
- var ret = doRead({"readConcern": {"level": "local"}});
- log("done doing dirty read.");
- return ret;
- }
+function doDirtyRead() {
+ log("doing dirty read");
+ var ret = doRead({"readConcern": {"level": "local"}});
+ log("done doing dirty read.");
+ return ret;
+}
- function doCommittedRead() {
- log("doing committed read");
- var ret = doRead({"readConcern": {"level": "majority"}});
- log("done doing committed read.");
- return ret;
- }
+function doCommittedRead() {
+ log("doing committed read");
+ var ret = doRead({"readConcern": {"level": "majority"}});
+ log("done doing committed read.");
+ return ret;
+}
- jsTest.log("doing write");
- assert.writeOK(
- t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}}));
- jsTest.log("doing read");
- assert.eq(doDirtyRead(), 0);
- jsTest.log("doing committed read");
- assert.eq(doCommittedRead(), 0);
- jsTest.log("stopping replTest; test completed successfully");
- replTest.stopSet();
+jsTest.log("doing write");
+assert.writeOK(t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}}));
+jsTest.log("doing read");
+assert.eq(doDirtyRead(), 0);
+jsTest.log("doing committed read");
+assert.eq(doCommittedRead(), 0);
+jsTest.log("stopping replTest; test completed successfully");
+replTest.stopSet();
}());
diff --git a/jstests/replsets/read_operations_during_rollback.js b/jstests/replsets/read_operations_during_rollback.js
index 18cd188921c..bab24b6e477 100644
--- a/jstests/replsets/read_operations_during_rollback.js
+++ b/jstests/replsets/read_operations_during_rollback.js
@@ -2,99 +2,95 @@
* This test makes sure 'find' and 'getMore' commands fail correctly during rollback.
*/
(function() {
- "use strict";
-
- load("jstests/replsets/libs/rollback_test.js");
-
- const dbName = "test";
- const collName = "coll";
-
- let setFailPoint = (node, failpoint) => {
- jsTestLog("Setting fail point " + failpoint);
- assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
- };
-
- let clearFailPoint = (node, failpoint) => {
- jsTestLog("Clearing fail point " + failpoint);
- assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: "off"}));
- };
-
- // Set up Rollback Test.
- let rollbackTest = new RollbackTest();
-
- // Insert a document to be read later.
- assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert({}));
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
-
- setFailPoint(rollbackNode, "rollbackHangAfterTransitionToRollback");
-
- setFailPoint(rollbackNode, "GetMoreHangBeforeReadLock");
-
- const joinGetMoreThread = startParallelShell(() => {
- db.getMongo().setSlaveOk();
- const cursorID =
- assert.commandWorked(db.runCommand({"find": "coll", batchSize: 0})).cursor.id;
- // Make sure an outstanding read operation gets killed during rollback even though the read
- // was started before rollback. Outstanding read operations are killed during rollback and
- // their connections are closed shortly after. So we would get either an error
- // (InterruptedDueToReplStateChange) if the error message is sent out and received before
- // the connection is closed or a network error exception.
- try {
- assert.commandFailedWithCode(db.runCommand({"getMore": cursorID, collection: "coll"}),
- ErrorCodes.InterruptedDueToReplStateChange);
- } catch (e) {
- assert.includes(e.toString(), "network error while attempting to run command");
- }
- }, rollbackNode.port);
-
- const cursorIdToBeReadDuringRollback =
- assert
- .commandWorked(rollbackNode.getDB(dbName).runCommand({"find": collName, batchSize: 0}))
- .cursor.id;
-
- // Wait for 'getMore' to hang.
- checkLog.contains(rollbackNode, "GetMoreHangBeforeReadLock fail point enabled.");
-
- // Start rollback.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
- jsTestLog("Reconnecting to " + rollbackNode.host + " after rollback");
- reconnect(rollbackNode.getDB(dbName));
-
- // Wait for rollback to hang.
- checkLog.contains(rollbackNode, "rollbackHangAfterTransitionToRollback fail point enabled.");
-
- clearFailPoint(rollbackNode, "GetMoreHangBeforeReadLock");
-
- jsTestLog("Wait for 'getMore' thread to join.");
- joinGetMoreThread();
-
- jsTestLog("Reading during rollback.");
- // Make sure that read operations fail during rollback.
- assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand({"find": collName}),
- ErrorCodes.NotMasterOrSecondary);
- assert.commandFailedWithCode(
- rollbackNode.getDB(dbName).runCommand(
- {"getMore": cursorIdToBeReadDuringRollback, collection: collName}),
- ErrorCodes.NotMasterOrSecondary);
-
- // Disable the best-effort check for primary-ness in the service entry point, so that we
- // exercise the real check for primary-ness in 'find' and 'getMore' commands.
- setFailPoint(rollbackNode, "skipCheckingForNotMasterInCommandDispatch");
- jsTestLog("Reading during rollback (again with command dispatch checks disabled).");
- assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand({"find": collName}),
- ErrorCodes.NotMasterOrSecondary);
- assert.commandFailedWithCode(
- rollbackNode.getDB(dbName).runCommand(
- {"getMore": cursorIdToBeReadDuringRollback, collection: collName}),
- ErrorCodes.NotMasterOrSecondary);
-
- clearFailPoint(rollbackNode, "rollbackHangAfterTransitionToRollback");
-
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check the replica set.
- rollbackTest.stop();
+"use strict";
+
+load("jstests/replsets/libs/rollback_test.js");
+
+const dbName = "test";
+const collName = "coll";
+
+let setFailPoint = (node, failpoint) => {
+ jsTestLog("Setting fail point " + failpoint);
+ assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: "alwaysOn"}));
+};
+
+let clearFailPoint = (node, failpoint) => {
+ jsTestLog("Clearing fail point " + failpoint);
+ assert.commandWorked(node.adminCommand({configureFailPoint: failpoint, mode: "off"}));
+};
+
+// Set up Rollback Test.
+let rollbackTest = new RollbackTest();
+
+// Insert a document to be read later.
+assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert({}));
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+
+setFailPoint(rollbackNode, "rollbackHangAfterTransitionToRollback");
+
+setFailPoint(rollbackNode, "GetMoreHangBeforeReadLock");
+
+const joinGetMoreThread = startParallelShell(() => {
+ db.getMongo().setSlaveOk();
+ const cursorID = assert.commandWorked(db.runCommand({"find": "coll", batchSize: 0})).cursor.id;
+ // Make sure an outstanding read operation gets killed during rollback even though the read
+ // was started before rollback. Outstanding read operations are killed during rollback and
+ // their connections are closed shortly after. So we would get either an error
+ // (InterruptedDueToReplStateChange) if the error message is sent out and received before
+ // the connection is closed or a network error exception.
+ try {
+ assert.commandFailedWithCode(db.runCommand({"getMore": cursorID, collection: "coll"}),
+ ErrorCodes.InterruptedDueToReplStateChange);
+ } catch (e) {
+ assert.includes(e.toString(), "network error while attempting to run command");
+ }
+}, rollbackNode.port);
+
+const cursorIdToBeReadDuringRollback =
+ assert.commandWorked(rollbackNode.getDB(dbName).runCommand({"find": collName, batchSize: 0}))
+ .cursor.id;
+
+// Wait for 'getMore' to hang.
+checkLog.contains(rollbackNode, "GetMoreHangBeforeReadLock fail point enabled.");
+
+// Start rollback.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+
+jsTestLog("Reconnecting to " + rollbackNode.host + " after rollback");
+reconnect(rollbackNode.getDB(dbName));
+
+// Wait for rollback to hang.
+checkLog.contains(rollbackNode, "rollbackHangAfterTransitionToRollback fail point enabled.");
+
+clearFailPoint(rollbackNode, "GetMoreHangBeforeReadLock");
+
+jsTestLog("Wait for 'getMore' thread to join.");
+joinGetMoreThread();
+
+jsTestLog("Reading during rollback.");
+// Make sure that read operations fail during rollback.
+assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand({"find": collName}),
+ ErrorCodes.NotMasterOrSecondary);
+assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand(
+ {"getMore": cursorIdToBeReadDuringRollback, collection: collName}),
+ ErrorCodes.NotMasterOrSecondary);
+
+// Disable the best-effort check for primary-ness in the service entry point, so that we
+// exercise the real check for primary-ness in 'find' and 'getMore' commands.
+setFailPoint(rollbackNode, "skipCheckingForNotMasterInCommandDispatch");
+jsTestLog("Reading during rollback (again with command dispatch checks disabled).");
+assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand({"find": collName}),
+ ErrorCodes.NotMasterOrSecondary);
+assert.commandFailedWithCode(rollbackNode.getDB(dbName).runCommand(
+ {"getMore": cursorIdToBeReadDuringRollback, collection: collName}),
+ ErrorCodes.NotMasterOrSecondary);
+
+clearFailPoint(rollbackNode, "rollbackHangAfterTransitionToRollback");
+
+rollbackTest.transitionToSteadyStateOperations();
+
+// Check the replica set.
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/read_operations_during_step_down.js b/jstests/replsets/read_operations_during_step_down.js
index 4f9507ff902..667e353d2fe 100644
--- a/jstests/replsets/read_operations_during_step_down.js
+++ b/jstests/replsets/read_operations_during_step_down.js
@@ -8,118 +8,116 @@ load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
(function() {
- "use strict";
+"use strict";
- const testName = "readOpsDuringStepDown";
- const dbName = "test";
- const collName = "coll";
+const testName = "readOpsDuringStepDown";
+const dbName = "test";
+const collName = "coll";
- var rst = new ReplSetTest({name: testName, nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+var rst = new ReplSetTest({name: testName, nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const primaryAdmin = primary.getDB("admin");
- const primaryColl = primaryDB[collName];
- const collNss = primaryColl.getFullName();
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryAdmin = primary.getDB("admin");
+const primaryColl = primaryDB[collName];
+const collNss = primaryColl.getFullName();
- TestData.dbName = dbName;
- TestData.collName = collName;
+TestData.dbName = dbName;
+TestData.collName = collName;
- jsTestLog("1. Do a document write");
- assert.writeOK(
+jsTestLog("1. Do a document write");
+assert.writeOK(
        primaryColl.insert({_id: 0}, {"writeConcern": {"w": "majority"}}));
- rst.awaitReplication();
-
- // Open a cursor on primary.
- const cursorIdToBeReadAfterStepDown =
- assert.commandWorked(primaryDB.runCommand({"find": collName, batchSize: 0})).cursor.id;
-
- jsTestLog("2. Start blocking getMore cmd before step down");
- const joinGetMoreThread = startParallelShell(() => {
- // Open another cursor on primary before step down.
- primaryDB = db.getSiblingDB(TestData.dbName);
- const cursorIdToBeReadDuringStepDown =
- assert.commandWorked(primaryDB.runCommand({"find": TestData.collName, batchSize: 0}))
- .cursor.id;
-
- // Enable the fail point for get more cmd.
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "alwaysOn"}));
-
- getMoreRes = assert.commandWorked(primaryDB.runCommand(
- {"getMore": cursorIdToBeReadDuringStepDown, collection: TestData.collName}));
- assert.docEq([{_id: 0}], getMoreRes.cursor.nextBatch);
- }, primary.port);
-
- // Wait for getmore cmd to reach the fail point.
- waitForCurOpByFailPoint(primaryAdmin, collNss, "waitAfterPinningCursorBeforeGetMoreBatch");
-
- jsTestLog("2. Start blocking find cmd before step down");
- const joinFindThread = startParallelShell(() => {
- // Enable the fail point for find cmd.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
-
- var findRes = assert.commandWorked(
- db.getSiblingDB(TestData.dbName).runCommand({"find": TestData.collName}));
- assert.docEq([{_id: 0}], findRes.cursor.firstBatch);
-
- }, primary.port);
-
- // Wait for find cmd to reach the fail point.
- waitForCurOpByFailPoint(primaryAdmin, collNss, "waitInFindBeforeMakingBatch");
-
- jsTestLog("3. Make primary step down");
- const joinStepDownThread = startParallelShell(() => {
- assert.commandWorked(db.adminCommand({"replSetStepDown": 100, "force": true}));
- }, primary.port);
-
- // Wait until the step down has started to kill user operations.
- checkLog.contains(primary, "Starting to kill user operations");
-
- // Enable "waitAfterReadCommandFinishesExecution" fail point to make sure the find and get more
- // commands on database 'test' does not complete before step down.
- assert.commandWorked(primaryAdmin.runCommand({
- configureFailPoint: "waitAfterReadCommandFinishesExecution",
- data: {db: dbName},
- mode: "alwaysOn"
- }));
-
- jsTestLog("4. Disable fail points");
- assert.commandWorked(
- primaryAdmin.runCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
- assert.commandWorked(primaryAdmin.runCommand(
- {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}));
-
- // Wait until the primary transitioned to SECONDARY state.
- joinStepDownThread();
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // We don't want to check if we have reached "waitAfterReadCommandFinishesExecution" fail point
- // because we already know that the primary has stepped down successfully. This implies that
- // the find and get more commands are still running even after the node stepped down.
- assert.commandWorked(primaryAdmin.runCommand(
- {configureFailPoint: "waitAfterReadCommandFinishesExecution", mode: "off"}));
-
- // Wait for find & getmore thread to join.
- joinGetMoreThread();
- joinFindThread();
-
- jsTestLog("5. Start get more cmd after step down");
- var getMoreRes = assert.commandWorked(
- primaryDB.runCommand({"getMore": cursorIdToBeReadAfterStepDown, collection: collName}));
+rst.awaitReplication();
+
+// Open a cursor on primary.
+const cursorIdToBeReadAfterStepDown =
+ assert.commandWorked(primaryDB.runCommand({"find": collName, batchSize: 0})).cursor.id;
+
+jsTestLog("2. Start blocking getMore cmd before step down");
+const joinGetMoreThread = startParallelShell(() => {
+ // Open another cursor on primary before step down.
+ primaryDB = db.getSiblingDB(TestData.dbName);
+ const cursorIdToBeReadDuringStepDown =
+ assert.commandWorked(primaryDB.runCommand({"find": TestData.collName, batchSize: 0}))
+ .cursor.id;
+
+ // Enable the fail point for get more cmd.
+ assert.commandWorked(db.adminCommand(
+ {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "alwaysOn"}));
+
+ getMoreRes = assert.commandWorked(primaryDB.runCommand(
+ {"getMore": cursorIdToBeReadDuringStepDown, collection: TestData.collName}));
assert.docEq([{_id: 0}], getMoreRes.cursor.nextBatch);
+}, primary.port);
- // Validate that no operations got killed on step down and no network disconnection happened due
- // to failed unacknowledged operations.
- let replMetrics =
- assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
- assert.eq(replMetrics.stepDown.userOperationsKilled, 0);
- // Should account for find and getmore commands issued before step down.
- assert.gte(replMetrics.stepDown.userOperationsRunning, 2);
- assert.eq(replMetrics.network.notMasterUnacknowledgedWrites, 0);
+// Wait for getmore cmd to reach the fail point.
+waitForCurOpByFailPoint(primaryAdmin, collNss, "waitAfterPinningCursorBeforeGetMoreBatch");
- rst.stopSet();
+jsTestLog("2. Start blocking find cmd before step down");
+const joinFindThread = startParallelShell(() => {
+ // Enable the fail point for find cmd.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "alwaysOn"}));
+
+ var findRes = assert.commandWorked(
+ db.getSiblingDB(TestData.dbName).runCommand({"find": TestData.collName}));
+ assert.docEq([{_id: 0}], findRes.cursor.firstBatch);
+}, primary.port);
+
+// Wait for find cmd to reach the fail point.
+waitForCurOpByFailPoint(primaryAdmin, collNss, "waitInFindBeforeMakingBatch");
+
+jsTestLog("3. Make primary step down");
+const joinStepDownThread = startParallelShell(() => {
+ assert.commandWorked(db.adminCommand({"replSetStepDown": 100, "force": true}));
+}, primary.port);
+
+// Wait until the step down has started to kill user operations.
+checkLog.contains(primary, "Starting to kill user operations");
+
+// Enable "waitAfterReadCommandFinishesExecution" fail point to make sure the find and get more
+// commands on database 'test' does not complete before step down.
+assert.commandWorked(primaryAdmin.runCommand({
+ configureFailPoint: "waitAfterReadCommandFinishesExecution",
+ data: {db: dbName},
+ mode: "alwaysOn"
+}));
+
+jsTestLog("4. Disable fail points");
+assert.commandWorked(
+ primaryAdmin.runCommand({configureFailPoint: "waitInFindBeforeMakingBatch", mode: "off"}));
+assert.commandWorked(primaryAdmin.runCommand(
+ {configureFailPoint: "waitAfterPinningCursorBeforeGetMoreBatch", mode: "off"}));
+
+// Wait until the primary transitioned to SECONDARY state.
+joinStepDownThread();
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// We don't want to check if we have reached "waitAfterReadCommandFinishesExecution" fail point
+// because we already know that the primary has stepped down successfully. This implies that
+// the find and get more commands are still running even after the node stepped down.
+assert.commandWorked(primaryAdmin.runCommand(
+ {configureFailPoint: "waitAfterReadCommandFinishesExecution", mode: "off"}));
+
+// Wait for find & getmore thread to join.
+joinGetMoreThread();
+joinFindThread();
+
+jsTestLog("5. Start get more cmd after step down");
+var getMoreRes = assert.commandWorked(
+ primaryDB.runCommand({"getMore": cursorIdToBeReadAfterStepDown, collection: collName}));
+assert.docEq([{_id: 0}], getMoreRes.cursor.nextBatch);
+
+// Validate that no operations got killed on step down and no network disconnection happened due
+// to failed unacknowledged operations.
+let replMetrics = assert.commandWorked(primaryAdmin.adminCommand({serverStatus: 1})).metrics.repl;
+assert.eq(replMetrics.stepDown.userOperationsKilled, 0);
+// Should account for find and getmore commands issued before step down.
+assert.gte(replMetrics.stepDown.userOperationsRunning, 2);
+assert.eq(replMetrics.network.notMasterUnacknowledgedWrites, 0);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/reconfig.js b/jstests/replsets/reconfig.js
index 7ae2f41d3c8..f6f83755f1f 100644
--- a/jstests/replsets/reconfig.js
+++ b/jstests/replsets/reconfig.js
@@ -3,46 +3,45 @@
* succeed without force if force is needed.
*/
(function() {
- "use strict";
-
- // Skip db hash check because secondary is left with a different config.
- TestData.skipCheckDBHashes = true;
-
- var numNodes = 5;
- var replTest = new ReplSetTest({name: 'testSet', nodes: numNodes});
- var nodes = replTest.startSet();
- replTest.initiate();
-
- var primary = replTest.getPrimary();
-
- replTest.awaitSecondaryNodes();
-
- jsTestLog("Valid reconfig");
- var config = primary.getDB("local").system.replset.findOne();
- printjson(config);
- config.version++;
- config.members[nodes.indexOf(primary)].priority = 2;
- assert.commandWorked(primary.getDB("admin").runCommand({replSetReconfig: config}));
- replTest.awaitReplication();
-
- jsTestLog("Invalid reconfig");
- config.version++;
- var badMember = {_id: numNodes, host: "localhost:12345", priority: "High"};
- config.members.push(badMember);
- var invalidConfigCode = 93;
- assert.commandFailedWithCode(primary.adminCommand({replSetReconfig: config}),
- invalidConfigCode);
-
- jsTestLog("No force when needed.");
- config.members = config.members.slice(0, numNodes - 1);
- var secondary = replTest.getSecondary();
- config.members[nodes.indexOf(secondary)].priority = 5;
- var admin = secondary.getDB("admin");
- var forceRequiredCode = 10107;
- assert.commandFailedWithCode(admin.runCommand({replSetReconfig: config}), forceRequiredCode);
-
- jsTestLog("Force when appropriate");
- assert.commandWorked(admin.runCommand({replSetReconfig: config, force: true}));
-
- replTest.stopSet();
+"use strict";
+
+// Skip db hash check because secondary is left with a different config.
+TestData.skipCheckDBHashes = true;
+
+var numNodes = 5;
+var replTest = new ReplSetTest({name: 'testSet', nodes: numNodes});
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var primary = replTest.getPrimary();
+
+replTest.awaitSecondaryNodes();
+
+jsTestLog("Valid reconfig");
+var config = primary.getDB("local").system.replset.findOne();
+printjson(config);
+config.version++;
+config.members[nodes.indexOf(primary)].priority = 2;
+assert.commandWorked(primary.getDB("admin").runCommand({replSetReconfig: config}));
+replTest.awaitReplication();
+
+jsTestLog("Invalid reconfig");
+config.version++;
+var badMember = {_id: numNodes, host: "localhost:12345", priority: "High"};
+config.members.push(badMember);
+var invalidConfigCode = 93;
+assert.commandFailedWithCode(primary.adminCommand({replSetReconfig: config}), invalidConfigCode);
+
+jsTestLog("No force when needed.");
+config.members = config.members.slice(0, numNodes - 1);
+var secondary = replTest.getSecondary();
+config.members[nodes.indexOf(secondary)].priority = 5;
+var admin = secondary.getDB("admin");
+var forceRequiredCode = 10107;
+assert.commandFailedWithCode(admin.runCommand({replSetReconfig: config}), forceRequiredCode);
+
+jsTestLog("Force when appropriate");
+assert.commandWorked(admin.runCommand({replSetReconfig: config, force: true}));
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/reconfig_during_election.js b/jstests/replsets/reconfig_during_election.js
index aaf33c8ac67..20e67a483c8 100644
--- a/jstests/replsets/reconfig_during_election.js
+++ b/jstests/replsets/reconfig_during_election.js
@@ -3,47 +3,50 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/election_handoff.js");
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/replsets/libs/election_handoff.js");
+load("jstests/libs/check_log.js");
- const rst = ReplSetTest({nodes: 2});
- const nodes = rst.startSet();
- const config = rst.getReplSetConfig();
- // Prevent elections and set heartbeat timeout >> electionHangsBeforeUpdateMemberState.
- config.settings = {electionTimeoutMillis: 12 * 60 * 60 * 1000, heartbeatTimeoutSecs: 60 * 1000};
- rst.initiate(config);
+const rst = ReplSetTest({nodes: 2});
+const nodes = rst.startSet();
+const config = rst.getReplSetConfig();
+// Prevent elections and set heartbeat timeout >> electionHangsBeforeUpdateMemberState.
+config.settings = {
+ electionTimeoutMillis: 12 * 60 * 60 * 1000,
+ heartbeatTimeoutSecs: 60 * 1000
+};
+rst.initiate(config);
- const incumbent = rst.getPrimary();
- const candidate = rst.getSecondary();
+const incumbent = rst.getPrimary();
+const candidate = rst.getSecondary();
- jsTestLog("Step down");
+jsTestLog("Step down");
- assert.commandWorked(candidate.adminCommand({
- configureFailPoint: "electionHangsBeforeUpdateMemberState",
- mode: "alwaysOn",
- data: {waitForMillis: 10 * 1000}
- }));
+assert.commandWorked(candidate.adminCommand({
+ configureFailPoint: "electionHangsBeforeUpdateMemberState",
+ mode: "alwaysOn",
+ data: {waitForMillis: 10 * 1000}
+}));
- // The incumbent sends replSetStepUp to the candidate for election handoff.
- assert.commandWorked(incumbent.adminCommand({
- replSetStepDown: ElectionHandoffTest.stepDownPeriodSecs,
- secondaryCatchUpPeriodSecs: ElectionHandoffTest.stepDownPeriodSecs / 2
- }));
+// The incumbent sends replSetStepUp to the candidate for election handoff.
+assert.commandWorked(incumbent.adminCommand({
+ replSetStepDown: ElectionHandoffTest.stepDownPeriodSecs,
+ secondaryCatchUpPeriodSecs: ElectionHandoffTest.stepDownPeriodSecs / 2
+}));
- jsTestLog("Wait for candidate to win the election");
+jsTestLog("Wait for candidate to win the election");
- checkLog.contains(
- candidate, "election succeeded - electionHangsBeforeUpdateMemberState fail point enabled");
+checkLog.contains(candidate,
+ "election succeeded - electionHangsBeforeUpdateMemberState fail point enabled");
- jsTestLog("Try to interrupt it with a reconfig");
+jsTestLog("Try to interrupt it with a reconfig");
- config.members[nodes.indexOf(candidate)].priority = 2;
- config.version++;
- assert.commandWorked(candidate.adminCommand({replSetReconfig: config, force: true}));
+config.members[nodes.indexOf(candidate)].priority = 2;
+config.version++;
+assert.commandWorked(candidate.adminCommand({replSetReconfig: config, force: true}));
- assert.commandWorked(candidate.adminCommand(
- {configureFailPoint: "electionHangsBeforeUpdateMemberState", mode: "off"}));
+assert.commandWorked(candidate.adminCommand(
+ {configureFailPoint: "electionHangsBeforeUpdateMemberState", mode: "off"}));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js
index d0aefb412a8..2cc177445e1 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js
@@ -12,250 +12,252 @@
*/
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
-
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const dbName = "test";
+const collName = "reconstruct_prepared_transactions_initial_sync";
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- const dbName = "test";
- const collName = "reconstruct_prepared_transactions_initial_sync";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+const session1 = primary.startSession();
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
- const session1 = primary.startSession();
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
+let session2 = primary.startSession();
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- let session2 = primary.startSession();
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+let session3 = primary.startSession();
+let sessionDB3 = session3.getDatabase(dbName);
+const sessionColl3 = sessionDB3.getCollection(collName);
- let session3 = primary.startSession();
- let sessionDB3 = session3.getDatabase(dbName);
- const sessionColl3 = sessionDB3.getCollection(collName);
+assert.commandWorked(sessionColl1.insert({_id: 1}));
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+assert.commandWorked(sessionColl3.insert({_id: 3}));
+assert.commandWorked(sessionColl3.insert({_id: 4}));
- assert.commandWorked(sessionColl1.insert({_id: 1}));
- assert.commandWorked(sessionColl2.insert({_id: 2}));
- assert.commandWorked(sessionColl3.insert({_id: 3}));
- assert.commandWorked(sessionColl3.insert({_id: 4}));
+jsTestLog("Preparing three transactions");
- jsTestLog("Preparing three transactions");
+session1.startTransaction();
+assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
- session1.startTransaction();
- assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+session3.startTransaction();
+assert.commandWorked(sessionColl3.update({_id: 3}, {_id: 3, a: 1}));
+const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session3);
- session3.startTransaction();
- assert.commandWorked(sessionColl3.update({_id: 3}, {_id: 3, a: 1}));
- const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session3);
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
+const lsid3 = session3.getSessionId();
+const txnNumber3 = session3.getTxnNumber_forTesting();
- const lsid3 = session3.getSessionId();
- const txnNumber3 = session3.getTxnNumber_forTesting();
+jsTestLog("Restarting the secondary");
- jsTestLog("Restarting the secondary");
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync after the secondary
+// has copied {_id: 1}, {_id: 2} and {_id: 3}. This way we can do some writes on the sync source
+// while initial sync is paused and know that its operations won't be copied during collection
+// cloning. Instead, the writes must be applied during oplog application.
+replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 3}}),
+ 'numInitialSyncAttempts': 1
+ }
+ },
+ true /* wait */);
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync after the secondary
- // has copied {_id: 1}, {_id: 2} and {_id: 3}. This way we can do some writes on the sync source
- // while initial sync is paused and know that its operations won't be copied during collection
- // cloning. Instead, the writes must be applied during oplog application.
- replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 3}}),
- 'numInitialSyncAttempts': 1
- }
- },
- true /* wait */);
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+jsTestLog("Running operations while collection cloning is paused");
- jsTestLog("Running operations while collection cloning is paused");
+// Perform writes while collection cloning is paused so that we know they must be applied during
+// the oplog application stage of initial sync.
+assert.commandWorked(testColl.insert({_id: 5}));
+
+let session4 = primary.startSession();
+let sessionDB4 = session4.getDatabase(dbName);
+const sessionColl4 = sessionDB4.getCollection(collName);
- // Perform writes while collection cloning is paused so that we know they must be applied during
- // the oplog application stage of initial sync.
- assert.commandWorked(testColl.insert({_id: 5}));
-
- let session4 = primary.startSession();
- let sessionDB4 = session4.getDatabase(dbName);
- const sessionColl4 = sessionDB4.getCollection(collName);
-
- jsTestLog("Preparing the fourth transaction");
-
- // Prepare a transaction while collection cloning is paused so that its oplog entry must be
- // applied during the oplog application phase of initial sync.
- session4.startTransaction();
- assert.commandWorked(sessionColl4.update({_id: 4}, {_id: 4, a: 1}));
- const prepareTimestamp4 = PrepareHelpers.prepareTransaction(session4, {w: 1});
-
- jsTestLog("Resuming initial sync");
-
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
-
- // Wait for the secondary to complete initial sync.
- replTest.awaitSecondaryNodes();
-
- jsTestLog("Initial sync completed");
-
- secondary.setSlaveOk();
- const secondaryColl = secondary.getDB(dbName).getCollection(collName);
-
- // Make sure that while reading from the node that went through initial sync, we can't read
- // changes to the documents from any of the prepared transactions after initial sync. Also, make
- // sure that the writes that happened when collection cloning was paused happened.
- const res = secondaryColl.find().sort({_id: 1}).toArray();
- assert.eq(res, [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}], res);
-
- jsTestLog("Checking that the first transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the first prepared transaction
- // after initial sync.
- assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1});
-
- jsTestLog("Committing the first transaction");
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
- replTest.awaitReplication();
-
- // Make sure that we can see the data from a committed transaction on the secondary if it was
- // applied during secondary oplog application.
- assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
-
- jsTestLog("Checking that the fourth transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the first prepared transaction
- // after initial sync.
- assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4});
-
- jsTestLog("Committing the fourth transaction");
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session4, prepareTimestamp4));
- replTest.awaitReplication();
-
- // Make sure that we can see the data from a committed transaction on the secondary if it was
- // applied during secondary oplog application.
- assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4, a: 1});
-
- jsTestLog("Stepping up the secondary");
-
- // Step up the secondary after initial sync is done and make sure the other two transactions are
- // properly prepared.
- replTest.stepUp(secondary);
- replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
- const newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB(dbName);
- testColl = testDB.getCollection(collName);
-
- // Force the second session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
- session2.setTxnNumber_forTesting(txnNumber2);
- sessionDB2 = session2.getDatabase(dbName);
-
- jsTestLog("Checking that the second transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the second prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2}]);
-
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- documents: [{_id: 6}],
- txnNumber: NumberLong(txnNumber2),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Committing the second transaction");
-
- // Make sure we can successfully commit the second transaction after recovery.
- assert.commandWorked(sessionDB2.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp2,
- txnNumber: NumberLong(txnNumber2),
- autocommit: false
- }));
- assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2, a: 1}]);
-
- jsTestLog("Attempting to run another transaction");
-
- // Make sure that we can run another conflicting transaction without any problems
- session2.startTransaction();
- assert.commandWorked(sessionDB2[collName].update({_id: 2}, {_id: 2, a: 3}));
- prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- assert.docEq(testColl.findOne({_id: 2}), {_id: 2, a: 3});
-
- // Force the third session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session3 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid3);
- session3.setTxnNumber_forTesting(txnNumber3);
- sessionDB3 = session3.getDatabase(dbName);
-
- jsTestLog("Checking that the third transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the third prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
-
- // Make sure that another write on the same document from the third transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 3}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the third transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB3.runCommand({
- insert: collName,
- documents: [{_id: 6}],
- txnNumber: NumberLong(txnNumber3),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Aborting the third transaction");
-
- // Make sure we can successfully abort the third transaction after recovery.
- assert.commandWorked(sessionDB3.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber3), autocommit: false}));
- assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
-
- replTest.stopSet();
+jsTestLog("Preparing the fourth transaction");
+
+// Prepare a transaction while collection cloning is paused so that its oplog entry must be
+// applied during the oplog application phase of initial sync.
+session4.startTransaction();
+assert.commandWorked(sessionColl4.update({_id: 4}, {_id: 4, a: 1}));
+const prepareTimestamp4 = PrepareHelpers.prepareTransaction(session4, {w: 1});
+
+jsTestLog("Resuming initial sync");
+
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+
+// Wait for the secondary to complete initial sync.
+replTest.awaitSecondaryNodes();
+
+jsTestLog("Initial sync completed");
+
+secondary.setSlaveOk();
+const secondaryColl = secondary.getDB(dbName).getCollection(collName);
+
+// Make sure that while reading from the node that went through initial sync, we can't read
+// changes to the documents from any of the prepared transactions after initial sync. Also, make
+// sure that the writes that happened when collection cloning was paused happened.
+const res = secondaryColl.find().sort({_id: 1}).toArray();
+assert.eq(res, [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}], res);
+
+jsTestLog("Checking that the first transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the first prepared transaction
+// after initial sync.
+assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1});
+
+jsTestLog("Committing the first transaction");
+
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
+replTest.awaitReplication();
+
+// Make sure that we can see the data from a committed transaction on the secondary if it was
+// applied during secondary oplog application.
+assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
+
+jsTestLog("Checking that the fourth transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the first prepared transaction
+// after initial sync.
+assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4});
+
+jsTestLog("Committing the fourth transaction");
+
+assert.commandWorked(PrepareHelpers.commitTransaction(session4, prepareTimestamp4));
+replTest.awaitReplication();
+
+// Make sure that we can see the data from a committed transaction on the secondary if it was
+// applied during secondary oplog application.
+assert.docEq(secondaryColl.findOne({_id: 4}), {_id: 4, a: 1});
+
+jsTestLog("Stepping up the secondary");
+
+// Step up the secondary after initial sync is done and make sure the other two transactions are
+// properly prepared.
+replTest.stepUp(secondary);
+replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
+const newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+
+// Force the second session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
+session2.setTxnNumber_forTesting(txnNumber2);
+sessionDB2 = session2.getDatabase(dbName);
+
+jsTestLog("Checking that the second transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the second prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2}]);
+
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ documents: [{_id: 6}],
+ txnNumber: NumberLong(txnNumber2),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Committing the second transaction");
+
+// Make sure we can successfully commit the second transaction after recovery.
+assert.commandWorked(sessionDB2.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp2,
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false
+}));
+assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2, a: 1}]);
+
+jsTestLog("Attempting to run another transaction");
+
+// Make sure that we can run another conflicting transaction without any problems
+session2.startTransaction();
+assert.commandWorked(sessionDB2[collName].update({_id: 2}, {_id: 2, a: 3}));
+prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+assert.docEq(testColl.findOne({_id: 2}), {_id: 2, a: 3});
+
+// Force the third session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session3 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid3);
+session3.setTxnNumber_forTesting(txnNumber3);
+sessionDB3 = session3.getDatabase(dbName);
+
+jsTestLog("Checking that the third transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the third prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
+
+// Make sure that another write on the same document from the third transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 3}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the third transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB3.runCommand({
+ insert: collName,
+ documents: [{_id: 6}],
+ txnNumber: NumberLong(txnNumber3),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Aborting the third transaction");
+
+// Make sure we can successfully abort the third transaction after recovery.
+assert.commandWorked(sessionDB3.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber3), autocommit: false}));
+assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js
index 274c2eb4d94..a9f35921e8e 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_index_build.js
@@ -7,120 +7,122 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
-
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
-
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- const dbName = "test";
- const collName = "reconstruct_prepared_transactions_initial_sync_index_build";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 0}));
-
- jsTestLog("Restarting the secondary");
-
- // Restart the secondary with startClean set to true so that it goes through initial sync. Also
- // restart the node with a failpoint turned on that will pause initial sync. This way we can do
- // some writes on the sync source while initial sync is paused and know that its operations
- // won't be copied during collection cloning. Instead, the writes must be applied during oplog
- // application.
- replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 1}}),
- 'numInitialSyncAttempts': 1
- }
- },
- true /* wait */);
-
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
-
- jsTestLog("Running operations while collection cloning is paused");
-
- // Perform writes while collection cloning is paused so that we know they must be applied during
- // the oplog application stage of initial sync.
- assert.commandWorked(testColl.insert({_id: 1, a: 1}));
- assert.commandWorked(testColl.createIndex({a: 1}));
- // Make the index build hang on the secondary so that initial sync gets to the prepared-txn
- // reconstruct stage with the index build still running.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuild', mode: "alwaysOn"}));
-
- let session = primary.startSession();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
-
- jsTestLog("Preparing the transaction");
-
- // Prepare a transaction while collection cloning is paused so that its oplog entry must be
- // applied during the oplog application phase of initial sync.
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1, a: 1}, {_id: 1, a: 2}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
-
- clearRawMongoProgramOutput();
- jsTestLog("Resuming initial sync");
-
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
-
- // Wait for log message.
- assert.soon(
- () =>
- rawMongoProgramOutput().indexOf(
- "blocking replication until index builds are finished on test.reconstruct_prepared_transactions_initial_sync_index_build, due to prepared transaction") >=
- 0,
- "replication not hanging");
-
- // Unblock index build.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: "off"}));
-
- // Wait for the secondary to complete initial sync.
- replTest.awaitSecondaryNodes();
-
- jsTestLog("Initial sync completed");
-
- secondary.setSlaveOk();
- const secondaryColl = secondary.getDB(dbName).getCollection(collName);
-
- // Make sure that while reading from the node that went through initial sync, we can't read
- // changes to the documents from the prepared transaction after initial sync. Also, make
- // sure that the writes that happened when collection cloning was paused happened.
- const res = secondaryColl.find().sort({_id: 1}).toArray();
- assert.eq(res, [{_id: 0}, {_id: 1, a: 1}], res);
-
- // Wait for the prepared transaction oplog entry to be majority committed before committing the
- // transaction.
- PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
-
- jsTestLog("Committing the transaction");
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- replTest.awaitReplication();
-
- // Make sure that we can see the data from the committed transaction on the secondary.
- assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 2});
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
+
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+const dbName = "test";
+const collName = "reconstruct_prepared_transactions_initial_sync_index_build";
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 0}));
+
+jsTestLog("Restarting the secondary");
+
+// Restart the secondary with startClean set to true so that it goes through initial sync. Also
+// restart the node with a failpoint turned on that will pause initial sync. This way we can do
+// some writes on the sync source while initial sync is paused and know that its operations
+// won't be copied during collection cloning. Instead, the writes must be applied during oplog
+// application.
+replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 1}}),
+ 'numInitialSyncAttempts': 1
+ }
+ },
+ true /* wait */);
+
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+
+jsTestLog("Running operations while collection cloning is paused");
+
+// Perform writes while collection cloning is paused so that we know they must be applied during
+// the oplog application stage of initial sync.
+assert.commandWorked(testColl.insert({_id: 1, a: 1}));
+assert.commandWorked(testColl.createIndex({a: 1}));
+// Make the index build hang on the secondary so that initial sync gets to the prepared-txn
+// reconstruct stage with the index build still running.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: "alwaysOn"}));
+
+let session = primary.startSession();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+
+jsTestLog("Preparing the transaction");
+
+// Prepare a transaction while collection cloning is paused so that its oplog entry must be
+// applied during the oplog application phase of initial sync.
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1, a: 1}, {_id: 1, a: 2}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+
+clearRawMongoProgramOutput();
+jsTestLog("Resuming initial sync");
+
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+
+// Wait for log message.
+assert.soon(
+ () =>
+ rawMongoProgramOutput().indexOf(
+ "blocking replication until index builds are finished on test.reconstruct_prepared_transactions_initial_sync_index_build, due to prepared transaction") >=
+ 0,
+ "replication not hanging");
+
+// Unblock index build.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'hangAfterStartingIndexBuild', mode: "off"}));
+
+// Wait for the secondary to complete initial sync.
+replTest.awaitSecondaryNodes();
+
+jsTestLog("Initial sync completed");
+
+secondary.setSlaveOk();
+const secondaryColl = secondary.getDB(dbName).getCollection(collName);
+
+// Make sure that while reading from the node that went through initial sync, we can't read
+// changes to the documents from the prepared transaction after initial sync. Also, make
+// sure that the writes that happened when collection cloning was paused happened.
+const res = secondaryColl.find().sort({_id: 1}).toArray();
+assert.eq(res, [{_id: 0}, {_id: 1, a: 1}], res);
+
+// Wait for the prepared transaction oplog entry to be majority committed before committing the
+// transaction.
+PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
+
+jsTestLog("Committing the transaction");
+
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+replTest.awaitReplication();
+
+// Make sure that we can see the data from the committed transaction on the secondary.
+assert.docEq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 2});
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js
index 340599aed54..cf388620e20 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js
@@ -9,194 +9,195 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "reconstruct_prepared_transactions_initial_sync_no_oplog_application";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "reconstruct_prepared_transactions_initial_sync_no_oplog_application";
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- const session1 = primary.startSession();
- const sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
+const session1 = primary.startSession();
+const sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
- let session2 = primary.startSession();
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+let session2 = primary.startSession();
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- let session3 = primary.startSession();
- let sessionDB3 = session3.getDatabase(dbName);
- const sessionColl3 = sessionDB3.getCollection(collName);
+let session3 = primary.startSession();
+let sessionDB3 = session3.getDatabase(dbName);
+const sessionColl3 = sessionDB3.getCollection(collName);
- assert.commandWorked(sessionColl1.insert({_id: 1}));
- assert.commandWorked(sessionColl2.insert({_id: 2}));
- assert.commandWorked(sessionColl3.insert({_id: 3}));
+assert.commandWorked(sessionColl1.insert({_id: 1}));
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+assert.commandWorked(sessionColl3.insert({_id: 3}));
- jsTestLog("Preparing three transactions");
+jsTestLog("Preparing three transactions");
- session1.startTransaction();
- assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
+session1.startTransaction();
+assert.commandWorked(sessionColl1.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(session1);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- session3.startTransaction();
- assert.commandWorked(sessionColl3.update({_id: 3}, {_id: 3, a: 1}));
- const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session3);
-
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
+session3.startTransaction();
+assert.commandWorked(sessionColl3.update({_id: 3}, {_id: 3, a: 1}));
+const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session3);
- const lsid3 = session3.getSessionId();
- const txnNumber3 = session3.getTxnNumber_forTesting();
-
- jsTestLog("Restarting the secondary");
-
- // Restart the secondary with startClean set to true so that it goes through initial sync.
- replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
- secondary = replTest.start(secondary,
- {startClean: true, setParameter: {'numInitialSyncAttempts': 1}},
- true /* wait */);
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
+
+const lsid3 = session3.getSessionId();
+const txnNumber3 = session3.getTxnNumber_forTesting();
+
+jsTestLog("Restarting the secondary");
- // Wait for the secondary to complete initial sync.
- replTest.awaitSecondaryNodes();
-
- jsTestLog("Initial sync completed");
-
- secondary.setSlaveOk();
- const secondaryColl = secondary.getDB(dbName).getCollection(collName);
-
- // Make sure that while reading from the node that went through initial sync, we can't read
- // changes to the documents from any of the prepared transactions after initial sync.
- const res = secondaryColl.find().sort({_id: 1}).toArray();
- assert.eq(res, [{_id: 1}, {_id: 2}, {_id: 3}], res);
-
- jsTestLog("Checking that the first transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the first prepared transaction
- // after initial sync.
- assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1});
-
- jsTestLog("Committing the first transaction");
-
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
- replTest.awaitReplication();
-
- // Make sure that we can see the data from a committed transaction on the secondary if it was
- // applied during secondary oplog application.
- assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
-
- jsTestLog("Stepping up the secondary");
-
- // Step up the secondary after initial sync is done and make sure the other two transactions are
- // properly prepared.
- replTest.stepUp(secondary);
- replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
- const newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB(dbName);
- testColl = testDB.getCollection(collName);
-
- // Force the second session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
- session2.setTxnNumber_forTesting(txnNumber2);
- sessionDB2 = session2.getDatabase(dbName);
-
- jsTestLog("Checking that the second transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the second prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2}]);
-
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- documents: [{_id: 4}],
- txnNumber: NumberLong(txnNumber2),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Committing the second transaction");
-
- // Make sure we can successfully commit the second transaction after recovery.
- assert.commandWorked(sessionDB2.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp2,
- txnNumber: NumberLong(txnNumber2),
- autocommit: false
- }));
- assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2, a: 1}]);
-
- jsTestLog("Attempting to run another transaction on the second session");
-
- // Make sure that we can run another conflicting transaction without any problems.
- session2.startTransaction();
- assert.commandWorked(sessionDB2[collName].update({_id: 2}, {_id: 2, a: 3}));
- prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
- assert.eq(testColl.findOne({_id: 2}), {_id: 2, a: 3});
-
- // Force the third session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session3 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid3);
- session3.setTxnNumber_forTesting(txnNumber3);
- sessionDB3 = session3.getDatabase(dbName);
-
- jsTestLog("Checking that the third transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the third prepared transaction
- // after initial sync.
- assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
-
- // Make sure that another write on the same document from the third transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 3}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the third transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB3.runCommand({
- insert: collName,
- documents: [{_id: 4}],
- txnNumber: NumberLong(txnNumber3),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Aborting the third transaction");
-
- // Make sure we can successfully abort the third transaction after recovery.
- assert.commandWorked(sessionDB3.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber3), autocommit: false}));
- assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
-
- replTest.stopSet();
+// Restart the secondary with startClean set to true so that it goes through initial sync.
+replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
+secondary = replTest.start(
+ secondary, {startClean: true, setParameter: {'numInitialSyncAttempts': 1}}, true /* wait */);
+
+// Wait for the secondary to complete initial sync.
+replTest.awaitSecondaryNodes();
+
+jsTestLog("Initial sync completed");
+
+secondary.setSlaveOk();
+const secondaryColl = secondary.getDB(dbName).getCollection(collName);
+
+// Make sure that while reading from the node that went through initial sync, we can't read
+// changes to the documents from any of the prepared transactions after initial sync.
+const res = secondaryColl.find().sort({_id: 1}).toArray();
+assert.eq(res, [{_id: 1}, {_id: 2}, {_id: 3}], res);
+
+jsTestLog("Checking that the first transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the first prepared transaction
+// after initial sync.
+assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1});
+
+jsTestLog("Committing the first transaction");
+
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp1));
+replTest.awaitReplication();
+
+// Make sure that we can see the data from a committed transaction on the secondary if it was
+// applied during secondary oplog application.
+assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
+
+jsTestLog("Stepping up the secondary");
+
+// Step up the secondary after initial sync is done and make sure the other two transactions are
+// properly prepared.
+replTest.stepUp(secondary);
+replTest.waitForState(secondary, ReplSetTest.State.PRIMARY);
+const newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+
+// Force the second session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session2 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid2);
+session2.setTxnNumber_forTesting(txnNumber2);
+sessionDB2 = session2.getDatabase(dbName);
+
+jsTestLog("Checking that the second transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the second prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2}]);
+
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ documents: [{_id: 4}],
+ txnNumber: NumberLong(txnNumber2),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Committing the second transaction");
+
+// Make sure we can successfully commit the second transaction after recovery.
+assert.commandWorked(sessionDB2.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp2,
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false
+}));
+assert.eq(testColl.find({_id: 2}).toArray(), [{_id: 2, a: 1}]);
+
+jsTestLog("Attempting to run another transaction on the second session");
+
+// Make sure that we can run another conflicting transaction without any problems.
+session2.startTransaction();
+assert.commandWorked(sessionDB2[collName].update({_id: 2}, {_id: 2, a: 3}));
+prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2);
+assert.commandWorked(PrepareHelpers.commitTransaction(session2, prepareTimestamp2));
+assert.eq(testColl.findOne({_id: 2}), {_id: 2, a: 3});
+
+// Force the third session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session3 = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid3);
+session3.setTxnNumber_forTesting(txnNumber3);
+sessionDB3 = session3.getDatabase(dbName);
+
+jsTestLog("Checking that the third transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the third prepared transaction
+// after initial sync.
+assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
+
+// Make sure that another write on the same document from the third transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 3}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the third transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB3.runCommand({
+ insert: collName,
+ documents: [{_id: 4}],
+ txnNumber: NumberLong(txnNumber3),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Aborting the third transaction");
+
+// Make sure we can successfully abort the third transaction after recovery.
+assert.commandWorked(sessionDB3.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber3), autocommit: false}));
+assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]);
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js
index ed8547453fa..30f9c497e48 100644
--- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js
+++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_on_oplog_seed.js
@@ -12,108 +12,110 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/check_log.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
- const config = replTest.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while the
- // secondary is restarting.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- replTest.initiate(config);
+const config = replTest.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while the
+// secondary is restarting.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+replTest.initiate(config);
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "reconstruct_prepared_transactions_initial_sync_on_oplog_seed";
+const dbName = "test";
+const collName = "reconstruct_prepared_transactions_initial_sync_on_oplog_seed";
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- assert.commandWorked(testColl.insert({_id: 1}));
+assert.commandWorked(testColl.insert({_id: 1}));
- jsTestLog("Restarting the secondary");
+jsTestLog("Restarting the secondary");
- // Restart the secondary with startClean set to true so that it goes through initial sync.
- replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
- secondary = replTest.start(
- secondary,
- {
- startClean: true,
- setParameter: {
- 'numInitialSyncAttempts': 2,
- // Fail point to force the first attempt to fail and hang before starting the second
- // attempt.
- 'failpoint.failAndHangInitialSync': tojson({mode: 'alwaysOn'}),
- 'failpoint.initialSyncHangDuringCollectionClone': tojson(
- {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 0}}),
- 'logComponentVerbosity': tojson({'replication': {'initialSync': 2}})
- }
- },
- true /* wait */);
+// Restart the secondary with startClean set to true so that it goes through initial sync.
+replTest.stop(secondary, undefined /* signal */, {skipValidation: true});
+secondary = replTest.start(
+ secondary,
+ {
+ startClean: true,
+ setParameter: {
+ 'numInitialSyncAttempts': 2,
+ // Fail point to force the first attempt to fail and hang before starting the second
+ // attempt.
+ 'failpoint.failAndHangInitialSync': tojson({mode: 'alwaysOn'}),
+ 'failpoint.initialSyncHangDuringCollectionClone': tojson(
+ {mode: 'alwaysOn', data: {namespace: testColl.getFullName(), numDocsToClone: 0}}),
+ 'logComponentVerbosity': tojson({'replication': {'initialSync': 2}})
+ }
+ },
+ true /* wait */);
- // Wait for failpoint to be reached so we know that collection cloning is paused.
- checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
+// Wait for failpoint to be reached so we know that collection cloning is paused.
+checkLog.contains(secondary, "initialSyncHangDuringCollectionClone fail point enabled");
- jsTestLog("Running operations while collection cloning is paused");
+jsTestLog("Running operations while collection cloning is paused");
- // Perform writes while collection cloning is paused so that we know they must be applied during
- // the first attempt of initial sync.
- assert.commandWorked(testColl.insert({_id: 2}));
+// Perform writes while collection cloning is paused so that we know they must be applied during
+// the first attempt of initial sync.
+assert.commandWorked(testColl.insert({_id: 2}));
- jsTestLog("Resuming initial sync");
+jsTestLog("Resuming initial sync");
- // Resume initial sync.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
+// Resume initial sync.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "initialSyncHangDuringCollectionClone", mode: "off"}));
- // Wait for failpoint to be reached so we know that first attempt is finishing and is about to
- // fail.
- checkLog.contains(secondary, "failAndHangInitialSync fail point enabled");
+// Wait for failpoint to be reached so we know that first attempt is finishing and is about to
+// fail.
+checkLog.contains(secondary, "failAndHangInitialSync fail point enabled");
- jsTestLog("Preparing the transaction before the second attempt of initial sync");
+jsTestLog("Preparing the transaction before the second attempt of initial sync");
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
- jsTestLog("Resuming initial sync for the second attempt");
- // Resume initial sync.
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "failAndHangInitialSync", mode: "off"}));
+jsTestLog("Resuming initial sync for the second attempt");
+// Resume initial sync.
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "failAndHangInitialSync", mode: "off"}));
- // Wait for the secondary to complete initial sync.
- replTest.awaitSecondaryNodes();
- PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
+// Wait for the secondary to complete initial sync.
+replTest.awaitSecondaryNodes();
+PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp);
- jsTestLog("Initial sync completed");
+jsTestLog("Initial sync completed");
- secondary.setSlaveOk();
- const secondaryColl = secondary.getDB(dbName).getCollection(collName);
+secondary.setSlaveOk();
+const secondaryColl = secondary.getDB(dbName).getCollection(collName);
- jsTestLog("Checking that the transaction is properly prepared");
+jsTestLog("Checking that the transaction is properly prepared");
- // Make sure that we can't read changes to the document from the prepared transaction after
- // initial sync.
- assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1});
+// Make sure that we can't read changes to the document from the prepared transaction after
+// initial sync.
+assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1});
- jsTestLog("Committing the transaction");
+jsTestLog("Committing the transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- replTest.awaitReplication();
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+replTest.awaitReplication();
- // Make sure that we can see the data from the committed transaction on the secondary if it was
- // applied during secondary oplog application.
- assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
+// Make sure that we can see the data from the committed transaction on the secondary if it was
+// applied during secondary oplog application.
+assert.eq(secondaryColl.findOne({_id: 1}), {_id: 1, a: 1});
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/recover_committed_aborted_prepared_transactions.js b/jstests/replsets/recover_committed_aborted_prepared_transactions.js
index 031682b2064..b5b88d6c549 100644
--- a/jstests/replsets/recover_committed_aborted_prepared_transactions.js
+++ b/jstests/replsets/recover_committed_aborted_prepared_transactions.js
@@ -7,130 +7,129 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/aggregation/extras/utils.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
- const dbName = "test";
- const collName = "recover_committed_aborted_prepared_transactions";
+const dbName = "test";
+const collName = "recover_committed_aborted_prepared_transactions";
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
- // Create collection we're using beforehand.
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+// Create collection we're using beforehand.
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- // Start two different sessions on the primary.
- let session1 = primary.startSession({causalConsistency: false});
- const sessionID1 = session1.getSessionId();
- const session2 = primary.startSession({causalConsistency: false});
+// Start two different sessions on the primary.
+let session1 = primary.startSession({causalConsistency: false});
+const sessionID1 = session1.getSessionId();
+const session2 = primary.startSession({causalConsistency: false});
- let sessionDB1 = session1.getDatabase(dbName);
- let sessionColl1 = sessionDB1.getCollection(collName);
+let sessionDB1 = session1.getDatabase(dbName);
+let sessionColl1 = sessionDB1.getCollection(collName);
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- assert.commandWorked(sessionColl1.insert({id: 1}));
+assert.commandWorked(sessionColl1.insert({id: 1}));
- rollbackTest.awaitLastOpCommitted();
+rollbackTest.awaitLastOpCommitted();
- // Prepare a transaction on the first session which will be committed eventually.
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({id: 2}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session1);
+// Prepare a transaction on the first session which will be committed eventually.
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({id: 2}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session1);
- // Prevent the stable timestamp from moving beyond the following prepared transactions so
- // that when we replay the oplog from the stable timestamp, we correctly recover them.
+// Prevent the stable timestamp from moving beyond the following prepared transactions so
+// that when we replay the oplog from the stable timestamp, we correctly recover them.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+
+// The following transactions will be prepared before the common point, so they must be in
+// prepare after rollback recovery.
+
+// Prepare another transaction on the second session which will be aborted.
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({id: 3}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
+
+// Commit the first transaction.
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp));
+
+// Abort the second transaction.
+assert.commandWorked(session2.abortTransaction_forTesting());
+
+// Check that we have two transactions in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+
+// The following write will be rolled back.
+rollbackTest.transitionToRollbackOperations();
+assert.commandWorked(testColl.insert({id: 4}));
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+
+try {
+ rollbackTest.transitionToSteadyStateOperations();
+} finally {
assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
-
- // The following transactions will be prepared before the common point, so they must be in
- // prepare after rollback recovery.
-
- // Prepare another transaction on the second session which will be aborted.
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({id: 3}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
-
- // Commit the first transaction.
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp));
-
- // Abort the second transaction.
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- // Check that we have two transactions in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- // The following write will be rolled back.
- rollbackTest.transitionToRollbackOperations();
- assert.commandWorked(testColl.insert({id: 4}));
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
- try {
- rollbackTest.transitionToSteadyStateOperations();
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
-
- // Make sure there are two transactions in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- // Make sure we can see the first two writes and the insert from the first prepared transaction.
- // Make sure we cannot see the insert from the second prepared transaction or the writes after
- // transitionToRollbackOperations.
- arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}]);
- arrayEq(sessionColl1.find().toArray(), [{_id: 1}, {_id: 2}]);
-
- assert.eq(testColl.count(), 2);
- assert.eq(sessionColl1.count(), 2);
-
- // Get the correct members after the topology changes.
- primary = rollbackTest.getPrimary();
- testDB = primary.getDB(dbName);
- testColl = testDB.getCollection(collName);
- const rst = rollbackTest.getTestFixture();
- const secondaries = rst.getSecondaries();
-
- // Make sure we can successfully run a prepared transaction on the same first session after
- // going through rollback. This ensures that the session state has properly been restored.
- session1 =
- PrepareHelpers.createSessionWithGivenId(primary, sessionID1, {causalConsistency: false});
- sessionDB1 = session1.getDatabase(dbName);
- sessionColl1 = sessionDB1.getCollection(collName);
- // The next transaction on this session should have a txnNumber of 1. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session1.setTxnNumber_forTesting(1);
-
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 5}));
- const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session1);
- // Make sure we can successfully retry the commitTransaction command after rollback.
- assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp3));
-
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 6}));
- PrepareHelpers.prepareTransaction(session1);
- assert.commandWorked(session1.abortTransaction_forTesting());
- // Retrying the abortTransaction command should fail with a NoSuchTransaction error.
- assert.commandFailedWithCode(sessionDB1.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(session1.getTxnNumber_forTesting()),
- autocommit: false,
- }),
- ErrorCodes.NoSuchTransaction);
-
- // Make sure we can see the insert after committing the prepared transaction.
- arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}, {_id: 5}]);
- assert.eq(testColl.count(), 3);
-
- rollbackTest.stop();
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
+
+// Make sure there are two transactions in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+
+// Make sure we can see the first two writes and the insert from the first prepared transaction.
+// Make sure we cannot see the insert from the second prepared transaction or the writes after
+// transitionToRollbackOperations.
+arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}]);
+arrayEq(sessionColl1.find().toArray(), [{_id: 1}, {_id: 2}]);
+
+assert.eq(testColl.count(), 2);
+assert.eq(sessionColl1.count(), 2);
+
+// Get the correct members after the topology changes.
+primary = rollbackTest.getPrimary();
+testDB = primary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+const rst = rollbackTest.getTestFixture();
+const secondaries = rst.getSecondaries();
+
+// Make sure we can successfully run a prepared transaction on the same first session after
+// going through rollback. This ensures that the session state has properly been restored.
+session1 = PrepareHelpers.createSessionWithGivenId(primary, sessionID1, {causalConsistency: false});
+sessionDB1 = session1.getDatabase(dbName);
+sessionColl1 = sessionDB1.getCollection(collName);
+// The next transaction on this session should have a txnNumber of 1. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session1.setTxnNumber_forTesting(1);
+
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 5}));
+const prepareTimestamp3 = PrepareHelpers.prepareTransaction(session1);
+// Make sure we can successfully retry the commitTransaction command after rollback.
+assert.commandWorked(PrepareHelpers.commitTransaction(session1, prepareTimestamp3));
+
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 6}));
+PrepareHelpers.prepareTransaction(session1);
+assert.commandWorked(session1.abortTransaction_forTesting());
+// Retrying the abortTransaction command should fail with a NoSuchTransaction error.
+assert.commandFailedWithCode(sessionDB1.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(session1.getTxnNumber_forTesting()),
+ autocommit: false,
+}),
+ ErrorCodes.NoSuchTransaction);
+
+// Make sure we can see the insert after committing the prepared transaction.
+arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}, {_id: 5}]);
+assert.eq(testColl.count(), 3);
+
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/recover_multiple_prepared_transactions_startup.js b/jstests/replsets/recover_multiple_prepared_transactions_startup.js
index 10d82bd5536..3711fbfa276 100644
--- a/jstests/replsets/recover_multiple_prepared_transactions_startup.js
+++ b/jstests/replsets/recover_multiple_prepared_transactions_startup.js
@@ -6,154 +6,154 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- let primary = replTest.getPrimary();
+let primary = replTest.getPrimary();
- const dbName = "test";
- const collName = "recover_multiple_prepared_transactions_startup";
- let testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "recover_multiple_prepared_transactions_startup";
+let testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- testDB.runCommand({drop: collName});
- assert.commandWorked(testDB.runCommand({create: collName}));
+testDB.runCommand({drop: collName});
+assert.commandWorked(testDB.runCommand({create: collName}));
- let session = primary.startSession({causalConsistency: false});
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+let session = primary.startSession({causalConsistency: false});
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- let session2 = primary.startSession({causalConsistency: false});
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+let session2 = primary.startSession({causalConsistency: false});
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- assert.commandWorked(sessionColl.insert({_id: 1}));
- assert.commandWorked(sessionColl2.insert({_id: 2}));
-
- jsTestLog("Disable snapshotting on all nodes");
-
- // Disable snapshotting so that future operations do not enter the majority snapshot.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
-
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
-
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
- let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
+assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+
+jsTestLog("Disable snapshotting on all nodes");
+
+// Disable snapshotting so that future operations do not enter the majority snapshot.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "alwaysOn"}));
+
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
+let prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
- const lsid = session.getSessionId();
- const txnNumber = session.getTxnNumber_forTesting();
-
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
-
- jsTestLog("Restarting node");
-
- // Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
- // unset on the node following the restart.
- replTest.stop(primary, undefined, {skipValidation: true});
- replTest.start(primary, {}, true);
-
- jsTestLog("Node was restarted");
-
- primary = replTest.getPrimary();
- testDB = primary.getDB(dbName);
-
- session = primary.startSession({causalConsistency: false});
- sessionDB = session.getDatabase(dbName);
-
- session2 = primary.startSession({causalConsistency: false});
- sessionDB2 = session.getDatabase(dbName);
-
- // Force the first session to use the same lsid and txnNumber as from before the restart. This
- // ensures that we're working with the same session and transaction.
- session._serverSession.handle.getId = () => lsid;
- session.setTxnNumber_forTesting(txnNumber);
-
- jsTestLog("Checking that the first transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the first transaction after
- // recovery.
- assert.eq(testDB[collName].find({_id: 1}).toArray(), [{_id: 1}]);
-
- // Make sure that another write on the same document from the first transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the first transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB.runCommand({
- insert: collName,
- documents: [{_id: 3}],
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Committing the first transaction");
-
- // Make sure we can successfully commit the first transaction after recovery.
- let commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: commitTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false
- }));
-
- // Force the second session to use the same lsid and txnNumber as from before the restart.
- // This ensures that we're working with the same session and transaction.
- session._serverSession.handle.getId = () => lsid2;
- session.setTxnNumber_forTesting(txnNumber2);
-
- jsTestLog("Checking that the second transaction is properly prepared");
-
- // Make sure that we can't read changes to the document from the second transaction after
- // recovery.
- assert.eq(testDB[collName].find({_id: 2}).toArray(), [{_id: 2}]);
-
- // Make sure that another write on the same document from the second transaction causes a write
- // conflict.
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Make sure that we cannot add other operations to the second transaction since it is prepared.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- documents: [{_id: 3}],
- txnNumber: NumberLong(txnNumber2),
- stmtId: NumberInt(2),
- autocommit: false
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- jsTestLog("Aborting the second transaction");
-
- // Make sure we can successfully abort the second transaction after recovery.
- assert.commandWorked(sessionDB2.adminCommand(
- {abortTransaction: 1, txnNumber: NumberLong(txnNumber2), autocommit: false}));
-
- jsTestLog("Attempting to run another transction");
-
- // Make sure that we can run another conflicting transaction after recovery without any
- // problems.
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 3}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 3});
-
- replTest.stopSet();
+const lsid = session.getSessionId();
+const txnNumber = session.getTxnNumber_forTesting();
+
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
+
+jsTestLog("Restarting node");
+
+// Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
+// unset on the node following the restart.
+replTest.stop(primary, undefined, {skipValidation: true});
+replTest.start(primary, {}, true);
+
+jsTestLog("Node was restarted");
+
+primary = replTest.getPrimary();
+testDB = primary.getDB(dbName);
+
+session = primary.startSession({causalConsistency: false});
+sessionDB = session.getDatabase(dbName);
+
+session2 = primary.startSession({causalConsistency: false});
+sessionDB2 = session.getDatabase(dbName);
+
+// Force the first session to use the same lsid and txnNumber as from before the restart. This
+// ensures that we're working with the same session and transaction.
+session._serverSession.handle.getId = () => lsid;
+session.setTxnNumber_forTesting(txnNumber);
+
+jsTestLog("Checking that the first transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the first transaction after
+// recovery.
+assert.eq(testDB[collName].find({_id: 1}).toArray(), [{_id: 1}]);
+
+// Make sure that another write on the same document from the first transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the first transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB.runCommand({
+ insert: collName,
+ documents: [{_id: 3}],
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Committing the first transaction");
+
+// Make sure we can successfully commit the first transaction after recovery.
+let commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: commitTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false
+}));
+
+// Force the second session to use the same lsid and txnNumber as from before the restart.
+// This ensures that we're working with the same session and transaction.
+session._serverSession.handle.getId = () => lsid2;
+session.setTxnNumber_forTesting(txnNumber2);
+
+jsTestLog("Checking that the second transaction is properly prepared");
+
+// Make sure that we can't read changes to the document from the second transaction after
+// recovery.
+assert.eq(testDB[collName].find({_id: 2}).toArray(), [{_id: 2}]);
+
+// Make sure that another write on the same document from the second transaction causes a write
+// conflict.
+assert.commandFailedWithCode(
+ testDB.runCommand(
+ {update: collName, updates: [{q: {_id: 2}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Make sure that we cannot add other operations to the second transaction since it is prepared.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ documents: [{_id: 3}],
+ txnNumber: NumberLong(txnNumber2),
+ stmtId: NumberInt(2),
+ autocommit: false
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+jsTestLog("Aborting the second transaction");
+
+// Make sure we can successfully abort the second transaction after recovery.
+assert.commandWorked(sessionDB2.adminCommand(
+ {abortTransaction: 1, txnNumber: NumberLong(txnNumber2), autocommit: false}));
+
+jsTestLog("Attempting to run another transction");
+
+// Make sure that we can run another conflicting transaction after recovery without any
+// problems.
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 3}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 3});
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/recover_prepared_transaction_state.js b/jstests/replsets/recover_prepared_transaction_state.js
index f87d35496c8..1b054718778 100644
--- a/jstests/replsets/recover_prepared_transaction_state.js
+++ b/jstests/replsets/recover_prepared_transaction_state.js
@@ -15,185 +15,182 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/aggregation/extras/utils.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
- const dbName = "test";
- const collName = "recover_prepared_transaction_state_after_rollback";
+const dbName = "test";
+const collName = "recover_prepared_transaction_state_after_rollback";
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
- // Create collection we're using beforehand.
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+// Create collection we're using beforehand.
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- // Start two different sessions on the primary.
- let session1 = primary.startSession({causalConsistency: false});
- let session2 = primary.startSession({causalConsistency: false});
+// Start two different sessions on the primary.
+let session1 = primary.startSession({causalConsistency: false});
+let session2 = primary.startSession({causalConsistency: false});
- // Save both session IDs so we can later start sessions with the same IDs and commit or
- // abort a prepared transaction on them.
- const sessionID1 = session1.getSessionId();
- const sessionID2 = session2.getSessionId();
+// Save both session IDs so we can later start sessions with the same IDs and commit or
+// abort a prepared transaction on them.
+const sessionID1 = session1.getSessionId();
+const sessionID2 = session2.getSessionId();
- let sessionDB1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDB1.getCollection(collName);
+let sessionDB1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDB1.getCollection(collName);
- let sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+let sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- assert.commandWorked(sessionColl1.insert({_id: 1}));
- assert.commandWorked(sessionColl1.insert({_id: 2}));
+assert.commandWorked(sessionColl1.insert({_id: 1}));
+assert.commandWorked(sessionColl1.insert({_id: 2}));
- rollbackTest.awaitLastOpCommitted();
+rollbackTest.awaitLastOpCommitted();
- // Prepare a transaction on the first session whose commit will be rolled-back.
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({_id: 3}));
- assert.commandWorked(sessionColl1.update({_id: 1}, {$set: {a: 1}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session1);
+// Prepare a transaction on the first session whose commit will be rolled-back.
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({_id: 3}));
+assert.commandWorked(sessionColl1.update({_id: 1}, {$set: {a: 1}}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session1);
- // Prevent the stable timestamp from moving beyond the following prepared transactions so
- // that when we replay the oplog from the stable timestamp, we correctly recover them.
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
-
- // The following transactions will be prepared before the common point, so they must be in
- // prepare after rollback recovery.
-
- // Prepare another transaction on the second session whose abort will be rolled-back.
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 4}));
- assert.commandWorked(sessionColl2.update({_id: 2}, {$set: {b: 2}}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
-
- // Check that we have two transactions in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- // This characterizes the current behavior of fastcount, which is that the two open transaction
- // count toward the value.
- assert.eq(testColl.count(), 4);
-
- // The following commit and abort will be rolled back.
- rollbackTest.transitionToRollbackOperations();
- PrepareHelpers.commitTransaction(session1, prepareTimestamp);
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- // The fastcount should be accurate because there are no open transactions.
- assert.eq(testColl.count(), 3);
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- try {
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
-
- // Make sure there are two transactions in the transactions table after rollback recovery.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- // Make sure we can only see the first write and cannot see the writes from the prepared
- // transactions or the write that was rolled back.
- arrayEq(sessionColl1.find().toArray(), [{_id: 1}, {_id: 2}]);
- arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}]);
-
- // This check characterizes the current behavior of fastcount after rollback. It will not be
- // correct, but reflects the count at the point where both transactions are not yet committed or
- // aborted (because the operations were not majority committed). The count will eventually be
- // correct once the commit and abort are retried.
- assert.eq(sessionColl1.count(), 4);
- assert.eq(testColl.count(), 4);
-
- // Get the correct primary after the topology changes.
- primary = rollbackTest.getPrimary();
- rollbackTest.awaitReplication();
-
- // Make sure we can successfully commit the first rolled back prepared transaction.
- session1 =
- PrepareHelpers.createSessionWithGivenId(primary, sessionID1, {causalConsistency: false});
- sessionDB1 = session1.getDatabase(dbName);
- // The next transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session1.setTxnNumber_forTesting(0);
- const txnNumber1 = session1.getTxnNumber_forTesting();
-
- // Make sure we cannot add any operations to a prepared transaction.
- assert.commandFailedWithCode(sessionDB1.runCommand({
- insert: collName,
- txnNumber: NumberLong(txnNumber1),
- documents: [{_id: 10}],
- autocommit: false,
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- // Make sure that writing to a document that was updated in the first prepared transaction
- // causes a write conflict.
- assert.commandFailedWithCode(
- sessionDB1.runCommand(
- {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
- assert.commandWorked(sessionDB1.adminCommand({
- commitTransaction: 1,
- commitTimestamp: commitTimestamp,
- txnNumber: NumberLong(txnNumber1),
- autocommit: false,
- }));
- // Retry the commitTransaction command after rollback.
- assert.commandWorked(sessionDB1.adminCommand({
- commitTransaction: 1,
- commitTimestamp: commitTimestamp,
- txnNumber: NumberLong(txnNumber1),
- autocommit: false,
- }));
-
- // Make sure we can successfully abort the second recovered prepared transaction.
- session2 =
- PrepareHelpers.createSessionWithGivenId(primary, sessionID2, {causalConsistency: false});
- sessionDB2 = session2.getDatabase(dbName);
- // The next transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session2.setTxnNumber_forTesting(0);
- const txnNumber2 = session2.getTxnNumber_forTesting();
-
- // Make sure we cannot add any operations to a prepared transaction.
- assert.commandFailedWithCode(sessionDB2.runCommand({
- insert: collName,
- txnNumber: NumberLong(txnNumber2),
- documents: [{_id: 10}],
- autocommit: false,
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- // Make sure that writing to a document that was updated in the second prepared transaction
- // causes a write conflict.
- assert.commandFailedWithCode(
- sessionDB2.runCommand(
- {update: collName, updates: [{q: {_id: 2}, u: {$set: {b: 3}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- assert.commandWorked(sessionDB2.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber2),
- autocommit: false,
- }));
-
- rollbackTest.awaitReplication();
-
- // Make sure we can see the result of the committed prepared transaction and cannot see the
- // write from the aborted transaction.
- arrayEq(testColl.find().toArray(), [{_id: 1, a: 1}, {_id: 2}, {_id: 3}]);
- assert.eq(testColl.count(), 3);
-
- rollbackTest.stop();
+// Prevent the stable timestamp from moving beyond the following prepared transactions so
+// that when we replay the oplog from the stable timestamp, we correctly recover them.
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+
+// The following transactions will be prepared before the common point, so they must be in
+// prepare after rollback recovery.
+
+// Prepare another transaction on the second session whose abort will be rolled-back.
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 4}));
+assert.commandWorked(sessionColl2.update({_id: 2}, {$set: {b: 2}}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
+
+// Check that we have two transactions in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+// This characterizes the current behavior of fastcount, which is that the two open transaction
+// count toward the value.
+assert.eq(testColl.count(), 4);
+
+// The following commit and abort will be rolled back.
+rollbackTest.transitionToRollbackOperations();
+PrepareHelpers.commitTransaction(session1, prepareTimestamp);
+assert.commandWorked(session2.abortTransaction_forTesting());
+
+// The fastcount should be accurate because there are no open transactions.
+assert.eq(testColl.count(), 3);
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+try {
+ rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
+
+// Make sure there are two transactions in the transactions table after rollback recovery.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+
+// Make sure we can only see the first write and cannot see the writes from the prepared
+// transactions or the write that was rolled back.
+arrayEq(sessionColl1.find().toArray(), [{_id: 1}, {_id: 2}]);
+arrayEq(testColl.find().toArray(), [{_id: 1}, {_id: 2}]);
+
+// This check characterizes the current behavior of fastcount after rollback. It will not be
+// correct, but reflects the count at the point where both transactions are not yet committed or
+// aborted (because the operations were not majority committed). The count will eventually be
+// correct once the commit and abort are retried.
+assert.eq(sessionColl1.count(), 4);
+assert.eq(testColl.count(), 4);
+
+// Get the correct primary after the topology changes.
+primary = rollbackTest.getPrimary();
+rollbackTest.awaitReplication();
+
+// Make sure we can successfully commit the first rolled back prepared transaction.
+session1 = PrepareHelpers.createSessionWithGivenId(primary, sessionID1, {causalConsistency: false});
+sessionDB1 = session1.getDatabase(dbName);
+// The next transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session1.setTxnNumber_forTesting(0);
+const txnNumber1 = session1.getTxnNumber_forTesting();
+
+// Make sure we cannot add any operations to a prepared transaction.
+assert.commandFailedWithCode(sessionDB1.runCommand({
+ insert: collName,
+ txnNumber: NumberLong(txnNumber1),
+ documents: [{_id: 10}],
+ autocommit: false,
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+// Make sure that writing to a document that was updated in the first prepared transaction
+// causes a write conflict.
+assert.commandFailedWithCode(
+ sessionDB1.runCommand(
+ {update: collName, updates: [{q: {_id: 1}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+const commitTimestamp = Timestamp(prepareTimestamp.getTime(), prepareTimestamp.getInc() + 1);
+assert.commandWorked(sessionDB1.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: commitTimestamp,
+ txnNumber: NumberLong(txnNumber1),
+ autocommit: false,
+}));
+// Retry the commitTransaction command after rollback.
+assert.commandWorked(sessionDB1.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: commitTimestamp,
+ txnNumber: NumberLong(txnNumber1),
+ autocommit: false,
+}));
+
+// Make sure we can successfully abort the second recovered prepared transaction.
+session2 = PrepareHelpers.createSessionWithGivenId(primary, sessionID2, {causalConsistency: false});
+sessionDB2 = session2.getDatabase(dbName);
+// The next transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session2.setTxnNumber_forTesting(0);
+const txnNumber2 = session2.getTxnNumber_forTesting();
+
+// Make sure we cannot add any operations to a prepared transaction.
+assert.commandFailedWithCode(sessionDB2.runCommand({
+ insert: collName,
+ txnNumber: NumberLong(txnNumber2),
+ documents: [{_id: 10}],
+ autocommit: false,
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+// Make sure that writing to a document that was updated in the second prepared transaction
+// causes a write conflict.
+assert.commandFailedWithCode(
+ sessionDB2.runCommand(
+ {update: collName, updates: [{q: {_id: 2}, u: {$set: {b: 3}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+assert.commandWorked(sessionDB2.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber2),
+ autocommit: false,
+}));
+
+rollbackTest.awaitReplication();
+
+// Make sure we can see the result of the committed prepared transaction and cannot see the
+// write from the aborted transaction.
+arrayEq(testColl.find().toArray(), [{_id: 1, a: 1}, {_id: 2}, {_id: 3}]);
+assert.eq(testColl.count(), 3);
+
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/recover_prepared_transactions_startup_secondary_application.js b/jstests/replsets/recover_prepared_transactions_startup_secondary_application.js
index 38509a50898..df484d4c347 100644
--- a/jstests/replsets/recover_prepared_transactions_startup_secondary_application.js
+++ b/jstests/replsets/recover_prepared_transactions_startup_secondary_application.js
@@ -7,114 +7,114 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 2});
- const nodes = replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 2});
+const nodes = replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
+const primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
- const dbName = "test";
- const collName = "recover_prepared_transactions_startup_secondary_application";
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "recover_prepared_transactions_startup_secondary_application";
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testDB.runCommand({create: collName}));
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- const session2 = primary.startSession();
- const sessionDB2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDB2.getCollection(collName);
+const session2 = primary.startSession();
+const sessionDB2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDB2.getCollection(collName);
- assert.commandWorked(sessionColl.insert({_id: 1}));
- assert.commandWorked(sessionColl2.insert({_id: 2}));
+assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(sessionColl2.insert({_id: 2}));
- replTest.awaitReplication();
+replTest.awaitReplication();
- jsTestLog("Disable snapshotting on all nodes");
+jsTestLog("Disable snapshotting on all nodes");
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
- jsTestLog("Prepared a transaction at " + prepareTimestamp);
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {_id: 1, a: 1}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session, {w: 1});
+jsTestLog("Prepared a transaction at " + prepareTimestamp);
- session2.startTransaction();
- assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
- jsTestLog("Prepared another transaction at " + prepareTimestamp2);
+session2.startTransaction();
+assert.commandWorked(sessionColl2.update({_id: 2}, {_id: 2, a: 1}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(session2, {w: 1});
+jsTestLog("Prepared another transaction at " + prepareTimestamp2);
- const lsid = session.getSessionId();
- const txnNumber = session.getTxnNumber_forTesting();
+const lsid = session.getSessionId();
+const txnNumber = session.getTxnNumber_forTesting();
- const lsid2 = session2.getSessionId();
- const txnNumber2 = session2.getTxnNumber_forTesting();
+const lsid2 = session2.getSessionId();
+const txnNumber2 = session2.getTxnNumber_forTesting();
- jsTestLog("Restarting node");
+jsTestLog("Restarting node");
- // Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
- // unset on the node following the restart.
- replTest.stop(secondary, undefined, {skipValidation: true});
- secondary = replTest.start(secondary, {}, true);
+// Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
+// unset on the node following the restart.
+replTest.stop(secondary, undefined, {skipValidation: true});
+secondary = replTest.start(secondary, {}, true);
- jsTestLog("Secondary was restarted");
+jsTestLog("Secondary was restarted");
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "off"}));
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "disableSnapshotting", mode: "off"}));
- // It's illegal to commit a prepared transaction before its prepare oplog entry has been
- // majority committed. So wait for prepare oplog entry to be majority committed before issuing
- // the commitTransaction command.
- PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp2);
+// It's illegal to commit a prepared transaction before its prepare oplog entry has been
+// majority committed. So wait for prepare oplog entry to be majority committed before issuing
+// the commitTransaction command.
+PrepareHelpers.awaitMajorityCommitted(replTest, prepareTimestamp2);
- // Wait for the node to complete recovery before trying to read from it.
- replTest.awaitSecondaryNodes();
- secondary.setSlaveOk();
+// Wait for the node to complete recovery before trying to read from it.
+replTest.awaitSecondaryNodes();
+secondary.setSlaveOk();
- jsTestLog("Checking that the first transaction is properly prepared");
+jsTestLog("Checking that the first transaction is properly prepared");
- // Make sure that we can't read changes to the document from either transaction after recovery.
- const secondaryTestColl = secondary.getDB(dbName).getCollection(collName);
- assert.eq(secondaryTestColl.find({_id: 1}).toArray(), [{_id: 1}]);
- assert.eq(secondaryTestColl.find({_id: 2}).toArray(), [{_id: 2}]);
+// Make sure that we can't read changes to the document from either transaction after recovery.
+const secondaryTestColl = secondary.getDB(dbName).getCollection(collName);
+assert.eq(secondaryTestColl.find({_id: 1}).toArray(), [{_id: 1}]);
+assert.eq(secondaryTestColl.find({_id: 2}).toArray(), [{_id: 2}]);
- jsTestLog("Committing the first transaction");
+jsTestLog("Committing the first transaction");
- // Make sure we can successfully commit the first transaction after recovery and that we can see
- // all its changes when we read from the secondary.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- replTest.awaitReplication();
- assert.eq(secondaryTestColl.find().sort({_id: 1}).toArray(), [{_id: 1, a: 1}, {_id: 2}]);
+// Make sure we can successfully commit the first transaction after recovery and that we can see
+// all its changes when we read from the secondary.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+replTest.awaitReplication();
+assert.eq(secondaryTestColl.find().sort({_id: 1}).toArray(), [{_id: 1, a: 1}, {_id: 2}]);
- jsTestLog("Aborting the second transaction");
+jsTestLog("Aborting the second transaction");
- // Make sure we can successfully abort the second transaction after recovery and that we can't
- // see any of its operations when we read from the secondary.
- assert.commandWorked(session2.abortTransaction_forTesting());
- replTest.awaitReplication();
- assert.eq(secondaryTestColl.find().sort({_id: 1}).toArray(), [{_id: 1, a: 1}, {_id: 2}]);
+// Make sure we can successfully abort the second transaction after recovery and that we can't
+// see any of its operations when we read from the secondary.
+assert.commandWorked(session2.abortTransaction_forTesting());
+replTest.awaitReplication();
+assert.eq(secondaryTestColl.find().sort({_id: 1}).toArray(), [{_id: 1, a: 1}, {_id: 2}]);
- jsTestLog("Attempting to run another transction");
+jsTestLog("Attempting to run another transction");
- // Make sure that we can run another conflicting transaction after recovery without any
- // problems and that we can see its changes when we read from the secondary.
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 3}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testColl.findOne({_id: 1}), {_id: 1, a: 3});
- replTest.awaitReplication();
- assert.eq(secondaryTestColl.findOne({_id: 1}), {_id: 1, a: 3});
+// Make sure that we can run another conflicting transaction after recovery without any
+// problems and that we can see its changes when we read from the secondary.
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 3}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testColl.findOne({_id: 1}), {_id: 1, a: 3});
+replTest.awaitReplication();
+assert.eq(secondaryTestColl.findOne({_id: 1}), {_id: 1, a: 3});
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/recover_prepared_txn_with_multikey_write.js b/jstests/replsets/recover_prepared_txn_with_multikey_write.js
index 5a71d9054c0..b898fce07ba 100644
--- a/jstests/replsets/recover_prepared_txn_with_multikey_write.js
+++ b/jstests/replsets/recover_prepared_txn_with_multikey_write.js
@@ -5,39 +5,39 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- const rst = new ReplSetTest({
- nodes: [
- {},
- {
- // Disallow elections on secondary.
- rsConfig: {
- priority: 0,
- votes: 0,
- }
+const rst = new ReplSetTest({
+ nodes: [
+ {},
+ {
+ // Disallow elections on secondary.
+ rsConfig: {
+ priority: 0,
+ votes: 0,
}
- ]
- });
+ }
+ ]
+});
- rst.startSet();
- rst.initiate();
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
+const primary = rst.getPrimary();
- const session = primary.getDB("test").getMongo().startSession();
- const sessionDB = session.getDatabase("test");
- const sessionColl = sessionDB.getCollection("coll");
+const session = primary.getDB("test").getMongo().startSession();
+const sessionDB = session.getDatabase("test");
+const sessionColl = sessionDB.getCollection("coll");
- // Create an index that will later be made multikey.
- sessionColl.createIndex({x: 1});
- session.startTransaction();
+// Create an index that will later be made multikey.
+sessionColl.createIndex({x: 1});
+session.startTransaction();
- // Make the index multikey.
- sessionColl.insert({x: [1, 2, 3]});
- assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1}));
+// Make the index multikey.
+sessionColl.insert({x: [1, 2, 3]});
+assert.commandWorked(sessionDB.adminCommand({prepareTransaction: 1}));
- // Do an unclean shutdown so we don't force a checkpoint, and then restart.
- rst.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- rst.restart(0);
+// Do an unclean shutdown so we don't force a checkpoint, and then restart.
+rst.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
+rst.restart(0);
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js b/jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js
index 45a005e255e..77700523439 100644
--- a/jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js
+++ b/jstests/replsets/recovery_after_clean_shutdown_but_not_all_writes_in_snapshot.js
@@ -5,75 +5,78 @@
* @tags: [requires_persistence, requires_replication]
*/
(function() {
- "use strict";
+"use strict";
- const rst = new ReplSetTest({
- name: "recoveryAfterCleanShutdown",
- nodes: 2,
- nodeOptions:
- {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
- });
- const nodes = rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({
+ name: "recoveryAfterCleanShutdown",
+ nodes: 2,
+ nodeOptions: {setParameter: {logComponentVerbosity: tojsononeline({storage: {recovery: 2}})}}
+});
+const nodes = rst.startSet();
+rst.initiate();
- const dbName = "recovery_clean_shutdown";
- let primaryDB = rst.getPrimary().getDB(dbName);
- const wMajority = {writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}};
+const dbName = "recovery_clean_shutdown";
+let primaryDB = rst.getPrimary().getDB(dbName);
+const wMajority = {
+ writeConcern: {w: "majority", wtimeout: ReplSetTest.kDefaultTimeoutMS}
+};
- // Create a collection that will have all of its writes in the stable checkpoint.
- const collAllStableWrites = "allWritesInStableCheckpoint";
- assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "dan"}, wMajority));
- assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "judah"}, wMajority));
- assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "vessy"}, wMajority));
- assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "kyle"}, wMajority));
+// Create a collection that will have all of its writes in the stable checkpoint.
+const collAllStableWrites = "allWritesInStableCheckpoint";
+assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "dan"}, wMajority));
+assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "judah"}, wMajority));
+assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "vessy"}, wMajority));
+assert.commandWorked(primaryDB[collAllStableWrites].insert({_id: "kyle"}, wMajority));
- // Set up a collection with some writes that make it into the stable checkpoint.
- const collSomeStableWrites = "someWritesInStableCheckpoint";
- assert.commandWorked(primaryDB[collSomeStableWrites].insert({_id: "erjon"}, wMajority));
- assert.commandWorked(primaryDB[collSomeStableWrites].insert({_id: "jungsoo"}, wMajority));
+// Set up a collection with some writes that make it into the stable checkpoint.
+const collSomeStableWrites = "someWritesInStableCheckpoint";
+assert.commandWorked(primaryDB[collSomeStableWrites].insert({_id: "erjon"}, wMajority));
+assert.commandWorked(primaryDB[collSomeStableWrites].insert({_id: "jungsoo"}, wMajority));
- // Set up a collection whose creation is in the stable checkpoint, but will have no stable
- // writes.
- const collNoStableWrites = "noWritesInStableCheckpoint";
- assert.commandWorked(primaryDB[collNoStableWrites].runCommand("create", wMajority));
+// Set up a collection whose creation is in the stable checkpoint, but will have no stable
+// writes.
+const collNoStableWrites = "noWritesInStableCheckpoint";
+assert.commandWorked(primaryDB[collNoStableWrites].runCommand("create", wMajority));
- // Wait for all oplog entries to enter the stable checkpoint on all secondaries.
- rst.awaitLastOpCommitted();
+// Wait for all oplog entries to enter the stable checkpoint on all secondaries.
+rst.awaitLastOpCommitted();
- // Disable snapshotting on all members of the replica set so that further operations do not
- // enter the majority snapshot.
- nodes.forEach(node => assert.commandWorked(node.adminCommand(
- {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
- const w1 = {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}};
+// Disable snapshotting on all members of the replica set so that further operations do not
+// enter the majority snapshot.
+nodes.forEach(node => assert.commandWorked(node.adminCommand(
+ {configureFailPoint: "disableSnapshotting", mode: "alwaysOn"})));
+const w1 = {
+ writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}
+};
- // Set up a collection whose creation is not in the stable checkpoint.
- const collNoStableCreation = "creationNotInStableCheckpoint";
- assert.commandWorked(primaryDB[collNoStableCreation].runCommand("create", w1));
+// Set up a collection whose creation is not in the stable checkpoint.
+const collNoStableCreation = "creationNotInStableCheckpoint";
+assert.commandWorked(primaryDB[collNoStableCreation].runCommand("create", w1));
- // Perform writes on collections that replicate to each node but do not enter the majority
- // snapshot. These commands will be replayed during replication recovery during restart.
- [collSomeStableWrites, collNoStableWrites, collNoStableCreation].forEach(
- coll => assert.commandWorked(
- primaryDB[coll].insert({_id: "insertedAfterSnapshottingDisabled"}, w1)));
- rst.awaitReplication();
+// Perform writes on collections that replicate to each node but do not enter the majority
+// snapshot. These commands will be replayed during replication recovery during restart.
+[collSomeStableWrites, collNoStableWrites, collNoStableCreation].forEach(
+ coll => assert.commandWorked(
+ primaryDB[coll].insert({_id: "insertedAfterSnapshottingDisabled"}, w1)));
+rst.awaitReplication();
- jsTestLog("Checking collection counts after snapshotting has been disabled");
- rst.checkCollectionCounts();
+jsTestLog("Checking collection counts after snapshotting has been disabled");
+rst.checkCollectionCounts();
- // Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
- // unset on each node following the restart.
- nodes.forEach(node => rst.restart(node));
- rst.awaitNodesAgreeOnPrimary();
- primaryDB = rst.getPrimary().getDB(dbName);
+// Perform a clean shutdown and restart. Note that the 'disableSnapshotting' failpoint will be
+// unset on each node following the restart.
+nodes.forEach(node => rst.restart(node));
+rst.awaitNodesAgreeOnPrimary();
+primaryDB = rst.getPrimary().getDB(dbName);
- // Perform a majority write to ensure that both nodes agree on the majority commit point.
- const collCreatedAfterRestart = "createdAfterRestart";
- assert.commandWorked(
- primaryDB[collCreatedAfterRestart].insert({_id: "insertedAfterRestart", wMajority}));
+// Perform a majority write to ensure that both nodes agree on the majority commit point.
+const collCreatedAfterRestart = "createdAfterRestart";
+assert.commandWorked(
+ primaryDB[collCreatedAfterRestart].insert({_id: "insertedAfterRestart", wMajority}));
- // Fast metadata count should be correct after restart in the face of a clean shutdown.
- jsTestLog("Checking collection counts after clean restart of all nodes");
- rst.checkCollectionCounts();
+// Fast metadata count should be correct after restart in the face of a clean shutdown.
+jsTestLog("Checking collection counts after clean restart of all nodes");
+rst.checkCollectionCounts();
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/recovery_preserves_active_txns.js b/jstests/replsets/recovery_preserves_active_txns.js
index 5896a1e01fc..005286cf152 100644
--- a/jstests/replsets/recovery_preserves_active_txns.js
+++ b/jstests/replsets/recovery_preserves_active_txns.js
@@ -11,83 +11,82 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
-
- // A new replica set for both the commit and abort tests to ensure the same clean state.
- function doTest(commitOrAbort) {
- const replSet = new ReplSetTest({
- // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions:
- {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
- nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
- });
-
- replSet.startSet(PrepareHelpers.replSetStartSetOptions);
- replSet.initiate();
- const primary = replSet.getPrimary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
-
- const coll = primary.getDB("test").test;
- assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
-
- jsTestLog("Prepare a transaction");
-
- const session = primary.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const oldestRequiredTimestampForCrashRecovery =
- PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
- assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
-
- jsTestLog("Insert documents until oplog exceeds oplogSize");
-
- // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- PrepareHelpers.growOplogPastMaxSize(replSet);
-
- // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
- var secondary = replSet.getSecondary();
- function checkSecondaryOplog() {
- const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.soon(() => {
- return secondaryOplog.dataSize() >= PrepareHelpers.oplogSizeBytes;
- }, "waiting for secondary oplog to grow", ReplSetTest.kDefaultTimeoutMS);
- const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
- assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
- }
- checkSecondaryOplog();
-
- jsTestLog("Restart the secondary");
-
- const secondaryId = replSet.getSecondary().nodeId;
- // Validation can't complete while the active transaction holds a lock.
- replSet.stop(secondaryId, undefined, {skipValidation: true});
- secondary = replSet.start(secondaryId, {}, true /* restart */);
-
- jsTestLog("Restarted");
-
- replSet.awaitSecondaryNodes();
- checkSecondaryOplog();
-
- if (commitOrAbort === "commit") {
- jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- } else if (commitOrAbort === "abort") {
- jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(session.abortTransaction_forTesting());
- } else {
- throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
- }
-
- PrepareHelpers.awaitOplogTruncation(replSet);
-
- // ReplSetTest reacts poorly to restarting a node, end it manually.
- replSet.stopSet(true, false, {});
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+
+// A new replica set for both the commit and abort tests to ensure the same clean state.
+function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+ const primary = replSet.getPrimary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ const oldestRequiredTimestampForCrashRecovery =
+ PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
+ assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ // Oplog grew past maxSize, and it includes the oldest active transaction's entry.
+ var secondary = replSet.getSecondary();
+ function checkSecondaryOplog() {
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.soon(() => {
+ return secondaryOplog.dataSize() >= PrepareHelpers.oplogSizeBytes;
+ }, "waiting for secondary oplog to grow", ReplSetTest.kDefaultTimeoutMS);
+ const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
}
- doTest("commit");
- doTest("abort");
+ checkSecondaryOplog();
+
+ jsTestLog("Restart the secondary");
+
+ const secondaryId = replSet.getSecondary().nodeId;
+ // Validation can't complete while the active transaction holds a lock.
+ replSet.stop(secondaryId, undefined, {skipValidation: true});
+ secondary = replSet.start(secondaryId, {}, true /* restart */);
+
+ jsTestLog("Restarted");
+
+ replSet.awaitSecondaryNodes();
+ checkSecondaryOplog();
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(session.abortTransaction_forTesting());
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
+ }
+
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ // ReplSetTest reacts poorly to restarting a node, end it manually.
+ replSet.stopSet(true, false, {});
+}
+doTest("commit");
+doTest("abort");
})();
diff --git a/jstests/replsets/refresh_sessions_rs.js b/jstests/replsets/refresh_sessions_rs.js
index 4539e667d07..24d553c2df7 100644
--- a/jstests/replsets/refresh_sessions_rs.js
+++ b/jstests/replsets/refresh_sessions_rs.js
@@ -1,82 +1,80 @@
(function() {
- "use strict";
-
- // This test makes assertions about the number of logical session records.
- TestData.disableImplicitSessions = true;
-
- var refresh = {refreshLogicalSessionCacheNow: 1};
- var startSession = {startSession: 1};
-
- // Start up a replica set.
- var dbName = "config";
-
- var replTest = new ReplSetTest({name: 'refresh', nodes: 3});
- var nodes = replTest.startSet();
-
- replTest.initiate();
- var primary = replTest.getPrimary();
-
- replTest.awaitSecondaryNodes();
- var server2 = replTest._slaves[0];
- var server3 = replTest._slaves[1];
-
- var db1 = primary.getDB(dbName);
- var db2 = server2.getDB(dbName);
- var db3 = server3.getDB(dbName);
-
- var res;
-
- // The primary needs to create the sessions collection so that the secondaries can act upon it.
- // This is done by an initial refresh of the primary.
- res = db1.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- replTest.awaitReplication();
-
- // Trigger an initial refresh on secondaries as a sanity check.
- res = db2.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- res = db3.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- // Connect to the primary and start a session.
- db1.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
-
- // That session should not be in db.system.sessions yet.
- assert.eq(db1.system.sessions.count(), 0, "should not have session records yet");
-
- // Connect to each replica set member and start a session.
- res = db2.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- res = db3.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
-
- // Connect to a secondary and trigger a refresh.
- res = db2.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- // Connect to the primary. The sessions collection here should have one record for the session
- // on the secondary.
- assert.eq(db1.system.sessions.count(), 1, "failed to refresh on the secondary");
-
- // Trigger a refresh on the primary. The sessions collection should now contain two records.
- res = db1.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- assert.eq(
- db1.system.sessions.count(), 2, "should have two local session records after refresh");
-
- // Trigger another refresh on all members.
- res = db2.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- res = db3.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
- res = db1.runCommand(refresh);
- assert.commandWorked(res, "failed to refresh");
-
- // The sessions collection on the primary should now contain all records.
- assert.eq(
- db1.system.sessions.count(), 3, "should have three local session records after refresh");
-
- // Stop the test.
- replTest.stopSet();
+"use strict";
+
+// This test makes assertions about the number of logical session records.
+TestData.disableImplicitSessions = true;
+
+var refresh = {refreshLogicalSessionCacheNow: 1};
+var startSession = {startSession: 1};
+
+// Start up a replica set.
+var dbName = "config";
+
+var replTest = new ReplSetTest({name: 'refresh', nodes: 3});
+var nodes = replTest.startSet();
+
+replTest.initiate();
+var primary = replTest.getPrimary();
+
+replTest.awaitSecondaryNodes();
+var server2 = replTest._slaves[0];
+var server3 = replTest._slaves[1];
+
+var db1 = primary.getDB(dbName);
+var db2 = server2.getDB(dbName);
+var db3 = server3.getDB(dbName);
+
+var res;
+
+// The primary needs to create the sessions collection so that the secondaries can act upon it.
+// This is done by an initial refresh of the primary.
+res = db1.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+replTest.awaitReplication();
+
+// Trigger an initial refresh on secondaries as a sanity check.
+res = db2.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+res = db3.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+// Connect to the primary and start a session.
+db1.runCommand(startSession);
+assert.commandWorked(res, "unable to start session");
+
+// That session should not be in db.system.sessions yet.
+assert.eq(db1.system.sessions.count(), 0, "should not have session records yet");
+
+// Connect to each replica set member and start a session.
+res = db2.runCommand(startSession);
+assert.commandWorked(res, "unable to start session");
+res = db3.runCommand(startSession);
+assert.commandWorked(res, "unable to start session");
+
+// Connect to a secondary and trigger a refresh.
+res = db2.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+// Connect to the primary. The sessions collection here should have one record for the session
+// on the secondary.
+assert.eq(db1.system.sessions.count(), 1, "failed to refresh on the secondary");
+
+// Trigger a refresh on the primary. The sessions collection should now contain two records.
+res = db1.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+assert.eq(db1.system.sessions.count(), 2, "should have two local session records after refresh");
+
+// Trigger another refresh on all members.
+res = db2.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+res = db3.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+res = db1.runCommand(refresh);
+assert.commandWorked(res, "failed to refresh");
+
+// The sessions collection on the primary should now contain all records.
+assert.eq(db1.system.sessions.count(), 3, "should have three local session records after refresh");
+
+// Stop the test.
+replTest.stopSet();
})();
diff --git a/jstests/replsets/rename_across_dbs.js b/jstests/replsets/rename_across_dbs.js
index 7976eb65103..1bbb2249581 100644
--- a/jstests/replsets/rename_across_dbs.js
+++ b/jstests/replsets/rename_across_dbs.js
@@ -1,7 +1,7 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- new RenameAcrossDatabasesTest().run();
+new RenameAcrossDatabasesTest().run();
}());
diff --git a/jstests/replsets/rename_across_dbs_drop_target.js b/jstests/replsets/rename_across_dbs_drop_target.js
index 8639f9caeff..33916a1cb09 100644
--- a/jstests/replsets/rename_across_dbs_drop_target.js
+++ b/jstests/replsets/rename_across_dbs_drop_target.js
@@ -1,8 +1,10 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/rename_across_dbs.js");
+load("jstests/replsets/libs/rename_across_dbs.js");
- const options = {dropTarget: true};
- new RenameAcrossDatabasesTest(options).run();
+const options = {
+ dropTarget: true
+};
+new RenameAcrossDatabasesTest(options).run();
}());
diff --git a/jstests/replsets/rename_collection_between_unrepl_and_repl.js b/jstests/replsets/rename_collection_between_unrepl_and_repl.js
index 07b318f7d76..e77da8a1b9b 100644
--- a/jstests/replsets/rename_collection_between_unrepl_and_repl.js
+++ b/jstests/replsets/rename_collection_between_unrepl_and_repl.js
@@ -6,39 +6,37 @@
*/
(function() {
- "use strict";
+"use strict";
- const name = "rename_collection_between_unrepl_and_repl";
- const rst = new ReplSetTest({"name": name, "nodes": 1});
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
+const name = "rename_collection_between_unrepl_and_repl";
+const rst = new ReplSetTest({"name": name, "nodes": 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
- /**
- * Part 1: Attempt to rename from a replicated to an unreplicated namespace.
- */
- let sourceNs = "somedb.replicated";
- let targetNs = "local.unreplicated";
+/**
+ * Part 1: Attempt to rename from a replicated to an unreplicated namespace.
+ */
+let sourceNs = "somedb.replicated";
+let targetNs = "local.unreplicated";
- // Ensure that the source collection exists.
- assert.commandWorked(primary.getCollection(sourceNs).insert({"fromRepl": "toUnrepl"}));
+// Ensure that the source collection exists.
+assert.commandWorked(primary.getCollection(sourceNs).insert({"fromRepl": "toUnrepl"}));
- assert.commandFailedWithCode(
- primary.adminCommand({"renameCollection": sourceNs, "to": targetNs}),
- ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(primary.adminCommand({"renameCollection": sourceNs, "to": targetNs}),
+ ErrorCodes.IllegalOperation);
- /**
- * Part 2: Attempt to rename from an unreplicated to a replicated namespace.
- */
- sourceNs = "local.alsoUnreplicated";
- targetNs = "somedb.alsoReplicated";
+/**
+ * Part 2: Attempt to rename from an unreplicated to a replicated namespace.
+ */
+sourceNs = "local.alsoUnreplicated";
+targetNs = "somedb.alsoReplicated";
- // Ensure that the source collection exists.
- assert.commandWorked(primary.getCollection(sourceNs).insert({"fromUnrepl": "toRepl"}));
+// Ensure that the source collection exists.
+assert.commandWorked(primary.getCollection(sourceNs).insert({"fromUnrepl": "toRepl"}));
- assert.commandFailedWithCode(
- primary.adminCommand({"renameCollection": sourceNs, "to": targetNs}),
- ErrorCodes.IllegalOperation);
+assert.commandFailedWithCode(primary.adminCommand({"renameCollection": sourceNs, "to": targetNs}),
+ ErrorCodes.IllegalOperation);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/rename_collection_temp.js b/jstests/replsets/rename_collection_temp.js
index 57d33f8beb5..dc6ffd6f84a 100644
--- a/jstests/replsets/rename_collection_temp.js
+++ b/jstests/replsets/rename_collection_temp.js
@@ -4,84 +4,80 @@
// @tags: [requires_replication]
(function() {
- "use strict";
-
- function checkCollectionTemp(db, collName, expectedTempValue) {
- var collectionInformation = db.getCollectionInfos();
-
- var hasSeenCollection = false;
- for (var i = 0; i < collectionInformation.length; i++) {
- var collection = collectionInformation[i];
-
- if (collection.name === collName) {
- hasSeenCollection = true;
-
- if (expectedTempValue) {
- // We expect this collection to be temporary.
- assert.eq(collection.options.temp, true);
- } else {
- // We expect this collection to be permanent, thus the temp option will not show
- // up.
- assert.isnull(collection.options.temp);
- }
+"use strict";
+
+function checkCollectionTemp(db, collName, expectedTempValue) {
+ var collectionInformation = db.getCollectionInfos();
+
+ var hasSeenCollection = false;
+ for (var i = 0; i < collectionInformation.length; i++) {
+ var collection = collectionInformation[i];
+
+ if (collection.name === collName) {
+ hasSeenCollection = true;
+
+ if (expectedTempValue) {
+ // We expect this collection to be temporary.
+ assert.eq(collection.options.temp, true);
+ } else {
+ // We expect this collection to be permanent, thus the temp option will not show
+ // up.
+ assert.isnull(collection.options.temp);
}
}
}
+}
- var replTest = new ReplSetTest({name: 'renameCollectionTest', nodes: 2});
- var nodes = replTest.startSet();
+var replTest = new ReplSetTest({name: 'renameCollectionTest', nodes: 2});
+var nodes = replTest.startSet();
- replTest.initiate();
+replTest.initiate();
- var master = replTest.getPrimary();
+var master = replTest.getPrimary();
- // Create a temporary collection.
- var dbFoo = master.getDB("foo");
+// Create a temporary collection.
+var dbFoo = master.getDB("foo");
- assert.commandWorked(dbFoo.runCommand({
- applyOps:
- [{op: "c", ns: dbFoo.getName() + ".$cmd", o: {create: "tempColl", temp: true}}]
- }));
- checkCollectionTemp(dbFoo, "tempColl", true);
+assert.commandWorked(dbFoo.runCommand(
+ {applyOps: [{op: "c", ns: dbFoo.getName() + ".$cmd", o: {create: "tempColl", temp: true}}]}));
+checkCollectionTemp(dbFoo, "tempColl", true);
- // Rename the collection.
- assert.commandWorked(
- master.adminCommand({renameCollection: "foo.tempColl", to: "foo.permanentColl"}));
+// Rename the collection.
+assert.commandWorked(
+ master.adminCommand({renameCollection: "foo.tempColl", to: "foo.permanentColl"}));
- // Confirm that it is no longer temporary.
- checkCollectionTemp(dbFoo, "permanentColl", false);
+// Confirm that it is no longer temporary.
+checkCollectionTemp(dbFoo, "permanentColl", false);
- replTest.awaitReplication();
+replTest.awaitReplication();
- var secondary = replTest.getSecondary();
- var secondaryFoo = secondary.getDB("foo");
+var secondary = replTest.getSecondary();
+var secondaryFoo = secondary.getDB("foo");
- secondaryFoo.permanentColl.setSlaveOk(true);
+secondaryFoo.permanentColl.setSlaveOk(true);
- // Get the information on the secondary to ensure it was replicated correctly.
- checkCollectionTemp(secondaryFoo, "permanentColl", false);
+// Get the information on the secondary to ensure it was replicated correctly.
+checkCollectionTemp(secondaryFoo, "permanentColl", false);
- // Check the behavior when the "dropTarget" flag is passed to renameCollection.
- dbFoo.permanentColl.drop();
+// Check the behavior when the "dropTarget" flag is passed to renameCollection.
+dbFoo.permanentColl.drop();
- assert.commandWorked(dbFoo.runCommand({
- applyOps:
- [{op: "c", ns: dbFoo.getName() + ".$cmd", o: {create: "tempColl", temp: true}}]
- }));
- checkCollectionTemp(dbFoo, "tempColl", true);
+assert.commandWorked(dbFoo.runCommand(
+ {applyOps: [{op: "c", ns: dbFoo.getName() + ".$cmd", o: {create: "tempColl", temp: true}}]}));
+checkCollectionTemp(dbFoo, "tempColl", true);
- // Construct an empty collection that will be dropped on rename.
- assert.commandWorked(dbFoo.runCommand({create: "permanentColl"}));
+// Construct an empty collection that will be dropped on rename.
+assert.commandWorked(dbFoo.runCommand({create: "permanentColl"}));
- // Rename, dropping "permanentColl" and replacing it.
- assert.commandWorked(master.adminCommand(
- {renameCollection: "foo.tempColl", to: "foo.permanentColl", dropTarget: true}));
+// Rename, dropping "permanentColl" and replacing it.
+assert.commandWorked(master.adminCommand(
+ {renameCollection: "foo.tempColl", to: "foo.permanentColl", dropTarget: true}));
- checkCollectionTemp(dbFoo, "permanentColl", false);
+checkCollectionTemp(dbFoo, "permanentColl", false);
- replTest.awaitReplication();
+replTest.awaitReplication();
- checkCollectionTemp(secondaryFoo, "permanentColl", false);
+checkCollectionTemp(secondaryFoo, "permanentColl", false);
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index a0754764db0..81a5d75c560 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -4,7 +4,6 @@ var ssl_name;
load("jstests/replsets/rslib.js");
load('jstests/replsets/libs/election_metrics.js');
var doTest = function(signal) {
-
// Test basic replica set functionality.
// -- Replication
// -- Failover
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index b6013c377c6..8b789db3547 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -1,7 +1,6 @@
load("jstests/replsets/rslib.js");
doTest = function(signal) {
-
// Test replication with write concern.
// Replica set testing API
diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js
index ee12d75a0bb..8723c562601 100644
--- a/jstests/replsets/replset3.js
+++ b/jstests/replsets/replset3.js
@@ -33,7 +33,7 @@ var doTest = function(signal) {
try {
var new_master = replTest.getPrimary();
} catch (err) {
- throw("Could not elect new master before timeout.");
+ throw ("Could not elect new master before timeout.");
}
print(phase++);
diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js
index 1d5920f1b9e..7cf49c2751a 100644
--- a/jstests/replsets/replset4.js
+++ b/jstests/replsets/replset4.js
@@ -1,5 +1,4 @@
doTest = function(signal) {
-
// Test orphaned master steps down
var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js
index 29ffc152c95..e714e034b87 100644
--- a/jstests/replsets/replset5.js
+++ b/jstests/replsets/replset5.js
@@ -2,79 +2,82 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- // Test write concern defaults
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
-
- var nodes = replTest.startSet();
-
- // Initiate set with default for write concern
- var config = replTest.getReplSetConfig();
- config.settings = {};
- config.settings.getLastErrorDefaults = {'w': 3, 'wtimeout': ReplSetTest.kDefaultTimeoutMS};
- config.settings.heartbeatTimeoutSecs = 15;
- // Prevent node 2 from becoming primary, as we will attempt to set it to hidden later.
- config.members[2].priority = 0;
-
- replTest.initiate(config);
-
- //
- var master = replTest.getPrimary();
- replTest.awaitSecondaryNodes();
- var testDB = "foo";
-
- // Initial replication
- master.getDB("barDB").bar.save({a: 1});
- replTest.awaitReplication();
-
- // These writes should be replicated immediately
- var docNum = 5000;
- var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp();
- for (var n = 0; n < docNum; n++) {
- bulk.insert({n: n});
- }
-
- // should use the configured last error defaults from above, that's what we're testing.
- //
- // If you want to test failure, just add values for w and wtimeout (e.g. w=1)
- // to the following command. This will override the default set above and
- // prevent replication from happening in time for the count tests below.
- //
- var result = bulk.execute();
- var wcError = result.getWriteConcernError();
-
- if (wcError != null) {
- print("\WARNING getLastError timed out and should not have: " + result.toString());
- print("This machine seems extremely slow. Stopping test without failing it\n");
- replTest.stopSet();
- return;
- }
+"use strict";
+// Test write concern defaults
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
+
+var nodes = replTest.startSet();
+
+// Initiate set with default for write concern
+var config = replTest.getReplSetConfig();
+config.settings = {};
+config.settings.getLastErrorDefaults = {
+ 'w': 3,
+ 'wtimeout': ReplSetTest.kDefaultTimeoutMS
+};
+config.settings.heartbeatTimeoutSecs = 15;
+// Prevent node 2 from becoming primary, as we will attempt to set it to hidden later.
+config.members[2].priority = 0;
+
+replTest.initiate(config);
+
+//
+var master = replTest.getPrimary();
+replTest.awaitSecondaryNodes();
+var testDB = "foo";
+
+// Initial replication
+master.getDB("barDB").bar.save({a: 1});
+replTest.awaitReplication();
+
+// These writes should be replicated immediately
+var docNum = 5000;
+var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp();
+for (var n = 0; n < docNum; n++) {
+ bulk.insert({n: n});
+}
+
+// should use the configured last error defaults from above, that's what we're testing.
+//
+// If you want to test failure, just add values for w and wtimeout (e.g. w=1)
+// to the following command. This will override the default set above and
+// prevent replication from happening in time for the count tests below.
+//
+var result = bulk.execute();
+var wcError = result.getWriteConcernError();
+
+if (wcError != null) {
+ print("\WARNING getLastError timed out and should not have: " + result.toString());
+ print("This machine seems extremely slow. Stopping test without failing it\n");
+ replTest.stopSet();
+ return;
+}
- var slaves = replTest._slaves;
- slaves[0].setSlaveOk();
- slaves[1].setSlaveOk();
+var slaves = replTest._slaves;
+slaves[0].setSlaveOk();
+slaves[1].setSlaveOk();
- var slave0count = slaves[0].getDB(testDB).foo.find().itcount();
- assert(slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
+var slave0count = slaves[0].getDB(testDB).foo.find().itcount();
+assert(slave0count == docNum, "Slave 0 has " + slave0count + " of " + docNum + " documents!");
- var slave1count = slaves[1].getDB(testDB).foo.find().itcount();
- assert(slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
+var slave1count = slaves[1].getDB(testDB).foo.find().itcount();
+assert(slave1count == docNum, "Slave 1 has " + slave1count + " of " + docNum + " documents!");
- var master1count = master.getDB(testDB).foo.find().itcount();
- assert(master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
+var master1count = master.getDB(testDB).foo.find().itcount();
+assert(master1count == docNum, "Master has " + master1count + " of " + docNum + " documents!");
- print("replset5.js reconfigure with hidden=1");
- config = master.getDB("local").system.replset.findOne();
+print("replset5.js reconfigure with hidden=1");
+config = master.getDB("local").system.replset.findOne();
- assert.eq(15, config.settings.heartbeatTimeoutSecs);
+assert.eq(15, config.settings.heartbeatTimeoutSecs);
- config.version++;
- config.members[2].hidden = 1;
+config.version++;
+config.members[2].hidden = 1;
- master = reconfig(replTest, config);
+master = reconfig(replTest, config);
- config = master.getSisterDB("local").system.replset.findOne();
- assert.eq(config.members[2].hidden, true);
+config = master.getSisterDB("local").system.replset.findOne();
+assert.eq(config.members[2].hidden, true);
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js
index 23169a2ff6a..65f1be46e16 100644
--- a/jstests/replsets/replset8.js
+++ b/jstests/replsets/replset8.js
@@ -2,85 +2,85 @@
// test for SERVER-6303 - if documents move backward during an initial sync.
(function() {
- "use strict";
- var rt = new ReplSetTest({name: "replset8", nodes: 1});
+"use strict";
+var rt = new ReplSetTest({name: "replset8", nodes: 1});
- var nodes = rt.startSet();
- rt.initiate();
- var master = rt.getPrimary();
- var bigstring = "a";
- var md = master.getDB('d');
- var mdc = md['c'];
+var nodes = rt.startSet();
+rt.initiate();
+var master = rt.getPrimary();
+var bigstring = "a";
+var md = master.getDB('d');
+var mdc = md['c'];
- // prep the data
+// prep the data
- // idea: create x documents of increasing size, then create x documents of size n.
- // delete first x documents. start initial sync (cloner). update all remaining
- // documents to be increasing size.
- // this should result in the updates moving the docs backwards.
+// idea: create x documents of increasing size, then create x documents of size n.
+// delete first x documents. start initial sync (cloner). update all remaining
+// documents to be increasing size.
+// this should result in the updates moving the docs backwards.
- var doccount = 5000;
- // Avoid empty extent issues
- mdc.insert({_id: -1, x: "dummy"});
+var doccount = 5000;
+// Avoid empty extent issues
+mdc.insert({_id: -1, x: "dummy"});
- jsTestLog('inserting ' + doccount + ' bigstrings');
- var bulk = mdc.initializeUnorderedBulkOp();
- for (var i = 0; i < doccount; ++i) {
- bulk.insert({_id: i, x: bigstring});
- bigstring += "a";
- }
- var result = assert.writeOK(bulk.execute());
- jsTestLog('insert 0-' + (doccount - 1) + ' result: ' + tojson(result));
- assert.eq(doccount, result.nInserted);
- assert.eq(doccount + 1, mdc.find().itcount());
+jsTestLog('inserting ' + doccount + ' bigstrings');
+var bulk = mdc.initializeUnorderedBulkOp();
+for (var i = 0; i < doccount; ++i) {
+ bulk.insert({_id: i, x: bigstring});
+ bigstring += "a";
+}
+var result = assert.writeOK(bulk.execute());
+jsTestLog('insert 0-' + (doccount - 1) + ' result: ' + tojson(result));
+assert.eq(doccount, result.nInserted);
+assert.eq(doccount + 1, mdc.find().itcount());
- jsTestLog('inserting ' + (doccount * 2) + ' documents - {_id: 0, x: 0} ... {_id: ' +
- (doccount * 2 - 1) + ', x: ' + (doccount * 2 - 1) + '}');
- bulk = mdc.initializeUnorderedBulkOp();
- for (i = doccount; i < doccount * 2; ++i) {
- bulk.insert({_id: i, x: i});
- }
- result = assert.writeOK(bulk.execute());
- jsTestLog('insert ' + doccount + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
- assert.eq(doccount, result.nInserted);
- assert.eq(doccount * 2 + 1, mdc.find().itcount());
+jsTestLog('inserting ' + (doccount * 2) + ' documents - {_id: 0, x: 0} ... {_id: ' +
+ (doccount * 2 - 1) + ', x: ' + (doccount * 2 - 1) + '}');
+bulk = mdc.initializeUnorderedBulkOp();
+for (i = doccount; i < doccount * 2; ++i) {
+ bulk.insert({_id: i, x: i});
+}
+result = assert.writeOK(bulk.execute());
+jsTestLog('insert ' + doccount + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
+assert.eq(doccount, result.nInserted);
+assert.eq(doccount * 2 + 1, mdc.find().itcount());
- jsTestLog('deleting ' + doccount + ' bigstrings');
- bulk = mdc.initializeUnorderedBulkOp();
- for (i = 0; i < doccount; ++i) {
- bulk.find({_id: i}).remove();
- }
- result = assert.writeOK(bulk.execute());
- jsTestLog('delete 0-' + (doccount - 1) + ' result: ' + tojson(result));
- assert.eq(doccount, result.nRemoved);
- assert.eq(doccount + 1, mdc.find().itcount());
+jsTestLog('deleting ' + doccount + ' bigstrings');
+bulk = mdc.initializeUnorderedBulkOp();
+for (i = 0; i < doccount; ++i) {
+ bulk.find({_id: i}).remove();
+}
+result = assert.writeOK(bulk.execute());
+jsTestLog('delete 0-' + (doccount - 1) + ' result: ' + tojson(result));
+assert.eq(doccount, result.nRemoved);
+assert.eq(doccount + 1, mdc.find().itcount());
- // add a secondary
- var slave = rt.add();
- rt.reInitiate();
- jsTestLog('reinitiation complete after adding new node to replicaset');
- rt.awaitSecondaryNodes();
- jsTestLog("updating documents backwards");
- // Move all documents to the beginning by growing them to sizes that should
- // fit the holes we made in phase 1
- bulk = mdc.initializeUnorderedBulkOp();
- for (i = doccount * 2; i > doccount; --i) {
- bulk.find({_id: i}).update({$set: {x: bigstring}});
- bigstring = bigstring.slice(0, -1); // remove last char
- }
- result = assert.writeOK(bulk.execute({w: rt.nodes.length}));
- jsTestLog('update ' + (doccount + 1) + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
- assert.eq(doccount - 1, result.nMatched);
- assert.eq(doccount - 1, result.nModified);
+// add a secondary
+var slave = rt.add();
+rt.reInitiate();
+jsTestLog('reinitiation complete after adding new node to replicaset');
+rt.awaitSecondaryNodes();
+jsTestLog("updating documents backwards");
+// Move all documents to the beginning by growing them to sizes that should
+// fit the holes we made in phase 1
+bulk = mdc.initializeUnorderedBulkOp();
+for (i = doccount * 2; i > doccount; --i) {
+ bulk.find({_id: i}).update({$set: {x: bigstring}});
+ bigstring = bigstring.slice(0, -1); // remove last char
+}
+result = assert.writeOK(bulk.execute({w: rt.nodes.length}));
+jsTestLog('update ' + (doccount + 1) + '-' + (doccount * 2 - 1) + ' result: ' + tojson(result));
+assert.eq(doccount - 1, result.nMatched);
+assert.eq(doccount - 1, result.nModified);
- assert.eq(doccount + 1,
- mdc.find().itcount(),
- 'incorrect collection size on primary (fast count: ' + mdc.count() + ')');
- assert.eq(doccount + 1,
- slave.getDB('d')['c'].find().itcount(),
- 'incorrect collection size on secondary (fast count: ' +
- slave.getDB('d')['c'].count() + ')');
+assert.eq(doccount + 1,
+ mdc.find().itcount(),
+ 'incorrect collection size on primary (fast count: ' + mdc.count() + ')');
+assert.eq(
+ doccount + 1,
+ slave.getDB('d')['c'].find().itcount(),
+ 'incorrect collection size on secondary (fast count: ' + slave.getDB('d')['c'].count() + ')');
- jsTestLog("finished");
- rt.stopSet();
+jsTestLog("finished");
+rt.stopSet();
})();
diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js
index 8e1712749e4..cda2c371180 100644
--- a/jstests/replsets/replsetarb2.js
+++ b/jstests/replsets/replsetarb2.js
@@ -1,48 +1,48 @@
// Election when master fails and remaining nodes are an arbiter and a slave.
(function() {
- "use strict";
-
- var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3});
- var nodes = replTest.nodeList();
-
- var conns = replTest.startSet();
- var r = replTest.initiate({
- "_id": "unicomplex",
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], "arbiterOnly": true, "votes": 1},
- {"_id": 2, "host": nodes[2]}
- ]
- });
-
- // Make sure we have a master
- var master = replTest.getPrimary();
-
- // Make sure we have an arbiter
- assert.soon(function() {
- var res = conns[1].getDB("admin").runCommand({replSetGetStatus: 1});
- printjson(res);
- return res.myState === 7;
- }, "Aribiter failed to initialize.");
-
- var result = conns[1].getDB("admin").runCommand({isMaster: 1});
- assert(result.arbiterOnly);
- assert(!result.passive);
-
- // Wait for initial replication
- master.getDB("foo").foo.insert({a: "foo"});
- replTest.awaitReplication();
-
- // Now kill the original master
- var mId = replTest.getNodeId(master);
- replTest.stop(mId);
-
- // And make sure that the slave is promoted
- var new_master = replTest.getPrimary();
-
- var newMasterId = replTest.getNodeId(new_master);
- assert.neq(newMasterId, mId, "Secondary wasn't promoted to new primary");
-
- replTest.stopSet(15);
+"use strict";
+
+var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3});
+var nodes = replTest.nodeList();
+
+var conns = replTest.startSet();
+var r = replTest.initiate({
+ "_id": "unicomplex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], "arbiterOnly": true, "votes": 1},
+ {"_id": 2, "host": nodes[2]}
+ ]
+});
+
+// Make sure we have a master
+var master = replTest.getPrimary();
+
+// Make sure we have an arbiter
+assert.soon(function() {
+ var res = conns[1].getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson(res);
+ return res.myState === 7;
+}, "Aribiter failed to initialize.");
+
+var result = conns[1].getDB("admin").runCommand({isMaster: 1});
+assert(result.arbiterOnly);
+assert(!result.passive);
+
+// Wait for initial replication
+master.getDB("foo").foo.insert({a: "foo"});
+replTest.awaitReplication();
+
+// Now kill the original master
+var mId = replTest.getNodeId(master);
+replTest.stop(mId);
+
+// And make sure that the slave is promoted
+var new_master = replTest.getPrimary();
+
+var newMasterId = replTest.getNodeId(new_master);
+assert.neq(newMasterId, mId, "Secondary wasn't promoted to new primary");
+
+replTest.stopSet(15);
}());
diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js
index dc2d5295e4e..bb1c1f7dc76 100644
--- a/jstests/replsets/replsetprio1.js
+++ b/jstests/replsets/replsetprio1.js
@@ -1,55 +1,55 @@
// should check that election happens in priority order
(function() {
- "use strict";
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodenames = replTest.nodeList();
-
- var nodes = replTest.startSet();
- replTest.initiateWithAnyNodeAsPrimary({
- "_id": "testSet",
- "members": [
- {"_id": 0, "host": nodenames[0], "priority": 1},
- {"_id": 1, "host": nodenames[1], "priority": 2},
- {"_id": 2, "host": nodenames[2], "priority": 3}
- ]
- });
-
- // 2 should be master (give this a while to happen, as other nodes might first be elected)
- replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 2);
-
- // wait for 1 to not appear to be master (we are about to make it master and need a clean slate
- // here)
- replTest.waitForState(nodes[1], ReplSetTest.State.SECONDARY);
-
- // Wait for election oplog entry to be replicated, to ensure 0 will vote for 1 after stopping 2.
- replTest.awaitReplication();
-
- // kill 2, 1 should take over
- replTest.stop(2);
-
- // 1 should eventually be master
- replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
-
- // do some writes on 1
- var master = replTest.getPrimary();
- for (var i = 0; i < 1000; i++) {
- assert.writeOK(master.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}}));
- }
-
- for (i = 0; i < 1000; i++) {
- assert.writeOK(master.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}}));
- }
-
- // bring 2 back up, 2 should wait until caught up and then become master
- replTest.restart(2);
- replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 2);
-
- // make sure nothing was rolled back
- master = replTest.getPrimary();
- for (i = 0; i < 1000; i++) {
- assert(master.getDB("foo").bar.findOne({i: i}) != null, 'checking ' + i);
- assert(master.getDB("bar").baz.findOne({i: i}) != null, 'checking ' + i);
- }
- replTest.stopSet();
+"use strict";
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodenames = replTest.nodeList();
+
+var nodes = replTest.startSet();
+replTest.initiateWithAnyNodeAsPrimary({
+ "_id": "testSet",
+ "members": [
+ {"_id": 0, "host": nodenames[0], "priority": 1},
+ {"_id": 1, "host": nodenames[1], "priority": 2},
+ {"_id": 2, "host": nodenames[2], "priority": 3}
+ ]
+});
+
+// 2 should be master (give this a while to happen, as other nodes might first be elected)
+replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 2);
+
+// wait for 1 to not appear to be master (we are about to make it master and need a clean slate
+// here)
+replTest.waitForState(nodes[1], ReplSetTest.State.SECONDARY);
+
+// Wait for election oplog entry to be replicated, to ensure 0 will vote for 1 after stopping 2.
+replTest.awaitReplication();
+
+// kill 2, 1 should take over
+replTest.stop(2);
+
+// 1 should eventually be master
+replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY);
+
+// do some writes on 1
+var master = replTest.getPrimary();
+for (var i = 0; i < 1000; i++) {
+ assert.writeOK(master.getDB("foo").bar.insert({i: i}, {writeConcern: {w: 'majority'}}));
+}
+
+for (i = 0; i < 1000; i++) {
+ assert.writeOK(master.getDB("bar").baz.insert({i: i}, {writeConcern: {w: 'majority'}}));
+}
+
+// bring 2 back up, 2 should wait until caught up and then become master
+replTest.restart(2);
+replTest.awaitNodesAgreeOnPrimary(replTest.kDefaultTimeoutMS, nodes, 2);
+
+// make sure nothing was rolled back
+master = replTest.getPrimary();
+for (i = 0; i < 1000; i++) {
+ assert(master.getDB("foo").bar.findOne({i: i}) != null, 'checking ' + i);
+ assert(master.getDB("bar").baz.findOne({i: i}) != null, 'checking ' + i);
+}
+replTest.stopSet();
}());
diff --git a/jstests/replsets/replsetrestart1.js b/jstests/replsets/replsetrestart1.js
index 01ab2c702e4..e090a1ff959 100644
--- a/jstests/replsets/replsetrestart1.js
+++ b/jstests/replsets/replsetrestart1.js
@@ -9,62 +9,62 @@
// @tags: [requires_persistence]
(function() {
- 'use strict';
+'use strict';
- var compare_configs = function(c1, c2) {
- assert.eq(c1.version, c2.version, 'version same');
- assert.eq(c1._id, c2._id, '_id same');
+var compare_configs = function(c1, c2) {
+ assert.eq(c1.version, c2.version, 'version same');
+ assert.eq(c1._id, c2._id, '_id same');
- for (var i in c1.members) {
- assert(c2.members[i] !== undefined, 'field ' + i + ' exists in both configs');
- assert.eq(c1.members[i]._id, c2.members[i]._id, 'id is equal in both configs');
- assert.eq(c1.members[i].host, c2.members[i].host, 'host is equal in both configs');
- }
- };
+ for (var i in c1.members) {
+ assert(c2.members[i] !== undefined, 'field ' + i + ' exists in both configs');
+ assert.eq(c1.members[i]._id, c2.members[i]._id, 'id is equal in both configs');
+ assert.eq(c1.members[i].host, c2.members[i].host, 'host is equal in both configs');
+ }
+};
- // Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
+// Create a new replica set test. Specify set name and the number of nodes you want.
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
- // call startSet() to start each mongod in the replica set
- // this returns a list of nodes
- replTest.startSet();
+// call startSet() to start each mongod in the replica set
+// this returns a list of nodes
+replTest.startSet();
- // Call initiate() to send the replSetInitiate command
- // This will wait for initiation
- replTest.initiate();
+// Call initiate() to send the replSetInitiate command
+// This will wait for initiation
+replTest.initiate();
- // Wait for at least one heartbeat to reach everyone, so that we will properly mark nodes as
- // DOWN, later.
- replTest.awaitSecondaryNodes();
+// Wait for at least one heartbeat to reach everyone, so that we will properly mark nodes as
+// DOWN, later.
+replTest.awaitSecondaryNodes();
- // Call getPrimary to return a reference to the node that's been
- // elected master.
- var master = replTest.getPrimary();
- var config1 = master.getDB("local").system.replset.findOne();
+// Call getPrimary to return a reference to the node that's been
+// elected master.
+var master = replTest.getPrimary();
+var config1 = master.getDB("local").system.replset.findOne();
- // Now we're going to shut down all nodes
- var mId = replTest.getNodeId(master);
- var s1 = replTest._slaves[0];
- var s1Id = replTest.getNodeId(s1);
- var s2 = replTest._slaves[1];
- var s2Id = replTest.getNodeId(s2);
+// Now we're going to shut down all nodes
+var mId = replTest.getNodeId(master);
+var s1 = replTest._slaves[0];
+var s1Id = replTest.getNodeId(s1);
+var s2 = replTest._slaves[1];
+var s2Id = replTest.getNodeId(s2);
- replTest.stop(s1Id);
- replTest.stop(s2Id);
- replTest.waitForState(s1, ReplSetTest.State.DOWN);
- replTest.waitForState(s2, ReplSetTest.State.DOWN);
+replTest.stop(s1Id);
+replTest.stop(s2Id);
+replTest.waitForState(s1, ReplSetTest.State.DOWN);
+replTest.waitForState(s2, ReplSetTest.State.DOWN);
- replTest.stop(mId);
+replTest.stop(mId);
- // Now let's restart these nodes
- replTest.restart(mId);
- replTest.restart(s1Id);
- replTest.restart(s2Id);
+// Now let's restart these nodes
+replTest.restart(mId);
+replTest.restart(s1Id);
+replTest.restart(s2Id);
- // Make sure that a new master comes up
- master = replTest.getPrimary();
- replTest.awaitSecondaryNodes();
- var config2 = master.getDB("local").system.replset.findOne();
- compare_configs(config1, config2);
- replTest.stopSet();
+// Make sure that a new master comes up
+master = replTest.getPrimary();
+replTest.awaitSecondaryNodes();
+var config2 = master.getDB("local").system.replset.findOne();
+compare_configs(config1, config2);
+replTest.stopSet();
}());
diff --git a/jstests/replsets/replsets_killop.js b/jstests/replsets/replsets_killop.js
index 4e41046f8c8..3fb42d6a244 100644
--- a/jstests/replsets/replsets_killop.js
+++ b/jstests/replsets/replsets_killop.js
@@ -17,9 +17,11 @@ assert.soon(function() {
});
// Start a parallel shell to insert new documents on the primary.
-inserter = startParallelShell('var bulk = db.test.initializeUnorderedBulkOp(); \
- for( i = 1; i < ' + numDocs +
- '; ++i ) { \
+inserter = startParallelShell(
+ 'var bulk = db.test.initializeUnorderedBulkOp(); \
+ for( i = 1; i < ' +
+ numDocs +
+ '; ++i ) { \
bulk.insert({ a: i }); \
} \
bulk.execute();');
diff --git a/jstests/replsets/request_primary_stepdown.js b/jstests/replsets/request_primary_stepdown.js
index 8ea0f78688c..6f4a37b1a22 100644
--- a/jstests/replsets/request_primary_stepdown.js
+++ b/jstests/replsets/request_primary_stepdown.js
@@ -4,36 +4,35 @@
// Eventually the high priority node will run a priority takeover election to become primary. During
// this election that node should make sure that it does not error in _requestRemotePrimaryStepDown.
(function() {
- 'use strict';
- load('jstests/replsets/rslib.js');
+'use strict';
+load('jstests/replsets/rslib.js');
- var name = 'request_primary_stepdown';
- var replSet = new ReplSetTest(
- {name: name, nodes: [{rsConfig: {priority: 3}}, {}, {rsConfig: {arbiterOnly: true}}]});
- replSet.startSet();
- replSet.initiate();
+var name = 'request_primary_stepdown';
+var replSet = new ReplSetTest(
+ {name: name, nodes: [{rsConfig: {priority: 3}}, {}, {rsConfig: {arbiterOnly: true}}]});
+replSet.startSet();
+replSet.initiate();
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- replSet.awaitSecondaryNodes();
- replSet.awaitReplication();
- var primary = replSet.getPrimary();
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+replSet.awaitSecondaryNodes();
+replSet.awaitReplication();
+var primary = replSet.getPrimary();
- assert.commandWorked(
- replSet.nodes[0].adminCommand({setParameter: 1, logComponentVerbosity: {executor: 4}}));
- clearRawMongoProgramOutput();
+assert.commandWorked(
+ replSet.nodes[0].adminCommand({setParameter: 1, logComponentVerbosity: {executor: 4}}));
+clearRawMongoProgramOutput();
- // Primary should step down long enough for election to occur on secondary.
- assert.commandWorked(
- primary.adminCommand({replSetStepDown: 70, secondaryCatchUpPeriodSecs: 60}));
+// Primary should step down long enough for election to occur on secondary.
+assert.commandWorked(primary.adminCommand({replSetStepDown: 70, secondaryCatchUpPeriodSecs: 60}));
- // Wait for node 1 to be promoted to primary after node 0 stepped down.
- replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000);
+// Wait for node 1 to be promoted to primary after node 0 stepped down.
+replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000);
- // Eventually node 0 will stand for election again because it has a higher priorty.
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 100 * 1000);
- var logContents = rawMongoProgramOutput();
- assert.eq(logContents.indexOf("stepdown period must be longer than secondaryCatchUpPeriodSecs"),
- -1,
- "_requestRemotePrimaryStepDown sent an invalid replSetStepDown command");
- replSet.stopSet();
+// Eventually node 0 will stand for election again because it has a higher priorty.
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 100 * 1000);
+var logContents = rawMongoProgramOutput();
+assert.eq(logContents.indexOf("stepdown period must be longer than secondaryCatchUpPeriodSecs"),
+ -1,
+ "_requestRemotePrimaryStepDown sent an invalid replSetStepDown command");
+replSet.stopSet();
})();
diff --git a/jstests/replsets/restore_term.js b/jstests/replsets/restore_term.js
index 05c03a2dfee..072a22eb974 100644
--- a/jstests/replsets/restore_term.js
+++ b/jstests/replsets/restore_term.js
@@ -10,52 +10,52 @@ load("jstests/replsets/rslib.js");
// storage engines.
// @tags: [requires_persistence]
(function() {
- "use strict";
-
- function getCurrentTerm(primary) {
- var res = primary.adminCommand({replSetGetStatus: 1});
- assert.commandWorked(res);
- return res.term;
- }
-
- var name = "restore_term";
- var rst = new ReplSetTest({name: name, nodes: 2});
-
- rst.startSet();
- rst.initiate();
- rst.awaitSecondaryNodes();
-
- var primary = rst.getPrimary();
- var primaryColl = primary.getDB("test").coll;
-
- // Current term may be greater than 1 if election race happens.
- var firstSuccessfulTerm = getCurrentTerm(primary);
- assert.gte(firstSuccessfulTerm, 1);
- assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}}));
- assert.eq(getCurrentTerm(primary), firstSuccessfulTerm);
-
- // Check that the insert op has the initial term.
- var latestOp = getLatestOp(primary);
- assert.eq(latestOp.op, "i");
- assert.eq(latestOp.t, firstSuccessfulTerm);
-
- // Step down to increase the term.
- assert.commandWorked(primary.adminCommand({replSetStepDown: 0}));
-
- rst.awaitSecondaryNodes();
- // The secondary became the new primary now with a higher term.
- // Since there's only one secondary who may run for election, the new term is higher by 1.
- assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1);
-
- // Restart the replset and verify the term is the same.
- rst.stopSet(null /* signal */, true /* forRestart */);
- rst.startSet({restart: true});
- rst.awaitSecondaryNodes();
- primary = rst.getPrimary();
-
- assert.eq(primary.getDB("test").coll.find().itcount(), 1);
- // After restart, the new primary stands up with the newer term.
- assert.gte(getCurrentTerm(primary), firstSuccessfulTerm + 1);
-
- rst.stopSet();
+"use strict";
+
+function getCurrentTerm(primary) {
+ var res = primary.adminCommand({replSetGetStatus: 1});
+ assert.commandWorked(res);
+ return res.term;
+}
+
+var name = "restore_term";
+var rst = new ReplSetTest({name: name, nodes: 2});
+
+rst.startSet();
+rst.initiate();
+rst.awaitSecondaryNodes();
+
+var primary = rst.getPrimary();
+var primaryColl = primary.getDB("test").coll;
+
+// Current term may be greater than 1 if election race happens.
+var firstSuccessfulTerm = getCurrentTerm(primary);
+assert.gte(firstSuccessfulTerm, 1);
+assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}}));
+assert.eq(getCurrentTerm(primary), firstSuccessfulTerm);
+
+// Check that the insert op has the initial term.
+var latestOp = getLatestOp(primary);
+assert.eq(latestOp.op, "i");
+assert.eq(latestOp.t, firstSuccessfulTerm);
+
+// Step down to increase the term.
+assert.commandWorked(primary.adminCommand({replSetStepDown: 0}));
+
+rst.awaitSecondaryNodes();
+// The secondary became the new primary now with a higher term.
+// Since there's only one secondary who may run for election, the new term is higher by 1.
+assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1);
+
+// Restart the replset and verify the term is the same.
+rst.stopSet(null /* signal */, true /* forRestart */);
+rst.startSet({restart: true});
+rst.awaitSecondaryNodes();
+primary = rst.getPrimary();
+
+assert.eq(primary.getDB("test").coll.find().itcount(), 1);
+// After restart, the new primary stands up with the newer term.
+assert.gte(getCurrentTerm(primary), firstSuccessfulTerm + 1);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/retryable_commit_transaction_after_failover.js b/jstests/replsets/retryable_commit_transaction_after_failover.js
index 30fd5af2904..e9c81e6b81c 100644
--- a/jstests/replsets/retryable_commit_transaction_after_failover.js
+++ b/jstests/replsets/retryable_commit_transaction_after_failover.js
@@ -1,109 +1,112 @@
// Test committed transaction state is restored after failover.
// @tags: [uses_transactions]
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const dbName = "test";
- const collName = "retryable_commit_transaction_after_failover";
+const dbName = "test";
+const collName = "retryable_commit_transaction_after_failover";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- const config = rst.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while
- // stepping up the old secondary.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+const config = rst.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while
+// stepping up the old secondary.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- // Get the connection to the replica set using MongoDB URI.
- const conn = new Mongo(rst.getURL());
- const testDB = conn.getDB(dbName);
- const testColl = testDB[collName];
+// Get the connection to the replica set using MongoDB URI.
+const conn = new Mongo(rst.getURL());
+const testDB = conn.getDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- let txnNumber = 0;
- let stmtId = 0;
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+let txnNumber = 0;
+let stmtId = 0;
- const sessionOptions = {causalConsistency: false};
- let session = testDB.getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = testDB.getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
- jsTest.log("commitTransaction command is retryable before failover");
- txnNumber++;
- stmtId = 0;
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-1"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- startTransaction: true,
- autocommit: false
- }));
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+jsTest.log("commitTransaction command is retryable before failover");
+txnNumber++;
+stmtId = 0;
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ startTransaction: true,
+ autocommit: false
+}));
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- // Retry commitTransaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+// Retry commitTransaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- jsTest.log("Step up the secondary");
- const oldPrimary = rst.getPrimary();
- const oldSecondary = rst.getSecondary();
- rst.stepUp(oldSecondary);
- // Wait until the other node becomes primary.
- assert.eq(oldSecondary, rst.getPrimary());
- // Reconnect the connection to the new primary.
- sessionDb.getMongo()._markNodeAsFailed(
- oldPrimary.host, ErrorCodes.NotMaster, "Notice that primary is not master");
- reconnect(sessionDb);
+jsTest.log("Step up the secondary");
+const oldPrimary = rst.getPrimary();
+const oldSecondary = rst.getSecondary();
+rst.stepUp(oldSecondary);
+// Wait until the other node becomes primary.
+assert.eq(oldSecondary, rst.getPrimary());
+// Reconnect the connection to the new primary.
+sessionDb.getMongo()._markNodeAsFailed(
+ oldPrimary.host, ErrorCodes.NotMaster, "Notice that primary is not master");
+reconnect(sessionDb);
- jsTest.log("commitTransaction command is retryable after failover");
- // Retry commitTransaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+jsTest.log("commitTransaction command is retryable after failover");
+// Retry commitTransaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- jsTest.log("Attempt to abort a committed transaction after failover");
- // Cannot abort the committed transaction.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.TransactionCommitted);
+jsTest.log("Attempt to abort a committed transaction after failover");
+// Cannot abort the committed transaction.
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.TransactionCommitted);
- jsTest.log("Attempt to continue a committed transaction after failover");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-2"}],
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.TransactionCommitted);
+jsTest.log("Attempt to continue a committed transaction after failover");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-2"}],
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.TransactionCommitted);
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/replsets/retryable_commit_transaction_after_restart.js b/jstests/replsets/retryable_commit_transaction_after_restart.js
index 802259661fb..2244525c977 100644
--- a/jstests/replsets/retryable_commit_transaction_after_restart.js
+++ b/jstests/replsets/retryable_commit_transaction_after_restart.js
@@ -1,96 +1,97 @@
// Test committed transaction state is restored after restart.
// @tags: [uses_transactions, requires_persistence]
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const dbName = "test";
- const collName = "retryable_commit_transaction_after_restart";
+const dbName = "test";
+const collName = "retryable_commit_transaction_after_restart";
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const testDB = rst.getPrimary().getDB(dbName);
- const testColl = testDB[collName];
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const testDB = rst.getPrimary().getDB(dbName);
+const testColl = testDB[collName];
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- assert.commandWorked(
- testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
- let txnNumber = 0;
- let stmtId = 0;
+assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}}));
+let txnNumber = 0;
+let stmtId = 0;
- const sessionOptions = {causalConsistency: false};
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = testDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
- jsTest.log("commitTransaction command is retryable before restart");
- txnNumber++;
- stmtId = 0;
- assert.commandWorked(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-1"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- startTransaction: true,
- autocommit: false
- }));
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+jsTest.log("commitTransaction command is retryable before restart");
+txnNumber++;
+stmtId = 0;
+assert.commandWorked(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ startTransaction: true,
+ autocommit: false
+}));
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- // Retry commitTransaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+// Retry commitTransaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- jsTest.log("restart the single node replset");
- rst.restart(0);
- // Wait until the node becomes a primary and reconnect.
- rst.getPrimary();
- reconnect(sessionDb);
+jsTest.log("restart the single node replset");
+rst.restart(0);
+// Wait until the node becomes a primary and reconnect.
+rst.getPrimary();
+reconnect(sessionDb);
- jsTest.log("commitTransaction command is retryable after restart");
- // Retry commitTransaction.
- assert.commandWorked(sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId),
- autocommit: false,
- writeConcern: {w: "majority"}
- }));
+jsTest.log("commitTransaction command is retryable after restart");
+// Retry commitTransaction.
+assert.commandWorked(sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}));
- jsTest.log("Attempt to abort a committed transaction after restart");
- // Cannot abort the committed transaction.
- assert.commandFailedWithCode(sessionDb.adminCommand({
- abortTransaction: 1,
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false,
- writeConcern: {w: "majority"}
- }),
- ErrorCodes.TransactionCommitted);
+jsTest.log("Attempt to abort a committed transaction after restart");
+// Cannot abort the committed transaction.
+assert.commandFailedWithCode(sessionDb.adminCommand({
+ abortTransaction: 1,
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+}),
+ ErrorCodes.TransactionCommitted);
- jsTest.log("Attempt to continue a committed transaction after restart");
- assert.commandFailedWithCode(sessionDb.runCommand({
- insert: collName,
- documents: [{_id: "commit-txn-2"}],
- txnNumber: NumberLong(txnNumber),
- stmtId: NumberInt(stmtId++),
- autocommit: false
- }),
- ErrorCodes.TransactionCommitted);
+jsTest.log("Attempt to continue a committed transaction after restart");
+assert.commandFailedWithCode(sessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "commit-txn-2"}],
+ txnNumber: NumberLong(txnNumber),
+ stmtId: NumberInt(stmtId++),
+ autocommit: false
+}),
+ ErrorCodes.TransactionCommitted);
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
}());
diff --git a/jstests/replsets/retryable_prepared_commit_transaction_after_failover.js b/jstests/replsets/retryable_prepared_commit_transaction_after_failover.js
index fbd05eadffb..ba3a15b83aa 100644
--- a/jstests/replsets/retryable_prepared_commit_transaction_after_failover.js
+++ b/jstests/replsets/retryable_prepared_commit_transaction_after_failover.js
@@ -5,98 +5,100 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collName = "foo";
+const dbName = "test";
+const collName = "foo";
- const rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+const rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- const config = rst.getReplSetConfig();
- // Increase the election timeout so that we do not accidentally trigger an election while
- // stepping up the old secondary.
- config.settings = {"electionTimeoutMillis": 12 * 60 * 60 * 1000};
- rst.initiate(config);
+const config = rst.getReplSetConfig();
+// Increase the election timeout so that we do not accidentally trigger an election while
+// stepping up the old secondary.
+config.settings = {
+ "electionTimeoutMillis": 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- const priConn = rst.getPrimary();
- const secConn = rst.getSecondary();
- assert.commandWorked(priConn.getDB(dbName).runCommand({create: collName}));
+const priConn = rst.getPrimary();
+const secConn = rst.getSecondary();
+assert.commandWorked(priConn.getDB(dbName).runCommand({create: collName}));
- const priSession = priConn.startSession();
- const priSessionDB = priSession.getDatabase(dbName);
- const priSessionColl = priSessionDB.getCollection(collName);
+const priSession = priConn.startSession();
+const priSessionDB = priSession.getDatabase(dbName);
+const priSessionColl = priSessionDB.getCollection(collName);
- jsTestLog("Prepare a transaction");
- priSession.startTransaction();
- assert.commandWorked(priSessionColl.insert({_id: 1}));
- const prepareTimestamp1 = PrepareHelpers.prepareTransaction(priSession);
+jsTestLog("Prepare a transaction");
+priSession.startTransaction();
+assert.commandWorked(priSessionColl.insert({_id: 1}));
+const prepareTimestamp1 = PrepareHelpers.prepareTransaction(priSession);
- jsTestLog("Error committing the transaction");
- // This will error in the "commit unprepared transaction" code path.
- assert.commandFailedWithCode(priSessionDB.adminCommand({commitTransaction: 1}),
- ErrorCodes.InvalidOptions);
+jsTestLog("Error committing the transaction");
+// This will error in the "commit unprepared transaction" code path.
+assert.commandFailedWithCode(priSessionDB.adminCommand({commitTransaction: 1}),
+ ErrorCodes.InvalidOptions);
- // This will error in the "commit prepared transaction" code path.
- const tooEarlyTS1 = Timestamp(prepareTimestamp1.getTime() - 1, 1);
- assert.commandFailedWithCode(
- priSessionDB.adminCommand({commitTransaction: 1, commitTimestamp: tooEarlyTS1}),
- ErrorCodes.InvalidOptions);
+// This will error in the "commit prepared transaction" code path.
+const tooEarlyTS1 = Timestamp(prepareTimestamp1.getTime() - 1, 1);
+assert.commandFailedWithCode(
+ priSessionDB.adminCommand({commitTransaction: 1, commitTimestamp: tooEarlyTS1}),
+ ErrorCodes.InvalidOptions);
- jsTestLog("Step up the secondary");
- rst.stepUp(secConn);
- assert.eq(secConn, rst.getPrimary());
- rst.waitForState(priConn, ReplSetTest.State.SECONDARY);
+jsTestLog("Step up the secondary");
+rst.stepUp(secConn);
+assert.eq(secConn, rst.getPrimary());
+rst.waitForState(priConn, ReplSetTest.State.SECONDARY);
- jsTestLog("commitTransaction command is retryable after failover");
+jsTestLog("commitTransaction command is retryable after failover");
- const secSession = new _DelegatingDriverSession(secConn, priSession);
- const secSessionDB = secSession.getDatabase(dbName);
- const secSessionColl = secSessionDB.getCollection(collName);
- assert.commandWorked(PrepareHelpers.commitTransaction(secSession, prepareTimestamp1));
+const secSession = new _DelegatingDriverSession(secConn, priSession);
+const secSessionDB = secSession.getDatabase(dbName);
+const secSessionColl = secSessionDB.getCollection(collName);
+assert.commandWorked(PrepareHelpers.commitTransaction(secSession, prepareTimestamp1));
- assert.eq(secConn.getDB(dbName)[collName].count(), 1);
- assert.eq(secConn.getDB(dbName)[collName].find().itcount(), 1);
+assert.eq(secConn.getDB(dbName)[collName].count(), 1);
+assert.eq(secConn.getDB(dbName)[collName].find().itcount(), 1);
- rst.awaitReplication();
+rst.awaitReplication();
- assert.eq(priConn.getDB(dbName)[collName].count(), 1);
- assert.eq(priConn.getDB(dbName)[collName].find().itcount(), 1);
+assert.eq(priConn.getDB(dbName)[collName].count(), 1);
+assert.eq(priConn.getDB(dbName)[collName].find().itcount(), 1);
- jsTestLog("Prepare a second transaction");
- secSession.startTransaction();
- assert.commandWorked(secSessionColl.insert({_id: 2}));
- const prepareTimestamp2 = PrepareHelpers.prepareTransaction(secSession);
+jsTestLog("Prepare a second transaction");
+secSession.startTransaction();
+assert.commandWorked(secSessionColl.insert({_id: 2}));
+const prepareTimestamp2 = PrepareHelpers.prepareTransaction(secSession);
- jsTestLog("Error committing the transaction");
- assert.commandFailedWithCode(secSessionDB.adminCommand({commitTransaction: 1}),
- ErrorCodes.InvalidOptions);
- const tooEarlyTS2 = Timestamp(prepareTimestamp2.getTime() - 1, 1);
- assert.commandFailedWithCode(
- secSessionDB.adminCommand({commitTransaction: 1, commitTimestamp: tooEarlyTS2}),
- ErrorCodes.InvalidOptions);
+jsTestLog("Error committing the transaction");
+assert.commandFailedWithCode(secSessionDB.adminCommand({commitTransaction: 1}),
+ ErrorCodes.InvalidOptions);
+const tooEarlyTS2 = Timestamp(prepareTimestamp2.getTime() - 1, 1);
+assert.commandFailedWithCode(
+ secSessionDB.adminCommand({commitTransaction: 1, commitTimestamp: tooEarlyTS2}),
+ ErrorCodes.InvalidOptions);
- jsTestLog("Step up the original primary");
- rst.stepUp(priConn);
- assert.eq(priConn, rst.getPrimary());
- rst.waitForState(secConn, ReplSetTest.State.SECONDARY);
+jsTestLog("Step up the original primary");
+rst.stepUp(priConn);
+assert.eq(priConn, rst.getPrimary());
+rst.waitForState(secConn, ReplSetTest.State.SECONDARY);
- jsTestLog("Step up the original secondary immediately");
- rst.stepUp(secConn);
- assert.eq(secConn, rst.getPrimary());
- rst.waitForState(priConn, ReplSetTest.State.SECONDARY);
+jsTestLog("Step up the original secondary immediately");
+rst.stepUp(secConn);
+assert.eq(secConn, rst.getPrimary());
+rst.waitForState(priConn, ReplSetTest.State.SECONDARY);
- assert.commandWorked(PrepareHelpers.commitTransaction(secSession, prepareTimestamp2));
+assert.commandWorked(PrepareHelpers.commitTransaction(secSession, prepareTimestamp2));
- assert.eq(secConn.getDB(dbName)[collName].count(), 2);
- assert.eq(secConn.getDB(dbName)[collName].find().itcount(), 2);
+assert.eq(secConn.getDB(dbName)[collName].count(), 2);
+assert.eq(secConn.getDB(dbName)[collName].find().itcount(), 2);
- rst.awaitReplication();
+rst.awaitReplication();
- assert.eq(priConn.getDB(dbName)[collName].count(), 2);
- assert.eq(priConn.getDB(dbName)[collName].find().itcount(), 2);
+assert.eq(priConn.getDB(dbName)[collName].count(), 2);
+assert.eq(priConn.getDB(dbName)[collName].find().itcount(), 2);
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/retryable_write_concern.js b/jstests/replsets/retryable_write_concern.js
index 65f5d4ccad7..376d966f193 100644
--- a/jstests/replsets/retryable_write_concern.js
+++ b/jstests/replsets/retryable_write_concern.js
@@ -5,236 +5,235 @@
*/
(function() {
- "use strict";
-
- load("jstests/libs/retryable_writes_util.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/libs/feature_compatibility_version.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- const kNodes = 2;
-
- let replTest = new ReplSetTest({nodes: kNodes});
- replTest.startSet({verbose: 1});
- replTest.initiate();
-
- let priConn = replTest.getPrimary();
- let secConn = replTest.getSecondary();
-
- // Stopping replication on secondaries can take up to 5 seconds normally. Set a small oplog
- // getMore timeout so the test runs faster.
- assert.commandWorked(secConn.adminCommand(
- {configureFailPoint: 'setSmallOplogGetMoreMaxTimeMS', mode: 'alwaysOn'}));
-
- let lsid = UUID();
-
- // Start at an arbitrary txnNumber.
- let txnNumber = 31;
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- insert: 'user',
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- update: 'user',
- updates: [
- {q: {_id: 10}, u: {$inc: {x: 1}}},
- ],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- delete: 'user',
- deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- findAndModify: 'user',
- query: {_id: 60},
- update: {$inc: {x: 1}},
- new: true,
- upsert: true,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes);
-
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- setFeatureCompatibilityVersion: lastStableFCV,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin');
- assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
- checkFCV(priConn.getDB('admin'), lastStableFCV);
-
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- setFeatureCompatibilityVersion: latestFCV,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin');
- assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
- checkFCV(priConn.getDB('admin'), latestFCV);
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- commitTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin',
- function(conn) {
- assert.commandWorked(conn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 80}, {_id: 90}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
-
- });
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- prepareTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin',
- function(conn) {
- assert.commandWorked(conn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 100}, {_id: 110}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
- });
- assert.commandWorked(priConn.adminCommand({
- abortTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority'},
- }));
-
- txnNumber++;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- abortTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin',
- function(conn) {
- assert.commandWorked(conn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 120}, {_id: 130}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
- assert.commandWorked(conn.adminCommand({
- prepareTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority'},
- }));
- });
-
- txnNumber++;
- assert.commandWorked(priConn.getDB('test').runCommand({
- insert: 'user',
- documents: [{_id: 140}, {_id: 150}],
- ordered: false,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- readConcern: {level: 'snapshot'},
- autocommit: false,
- startTransaction: true
- }));
- const prepareTS = assert
- .commandWorked(priConn.adminCommand({
- prepareTransaction: 1,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority'},
- }))
- .prepareTimestamp;
- runWriteConcernRetryabilityTest(priConn,
- secConn,
- {
- commitTransaction: 1,
- commitTimestamp: prepareTS,
- lsid: {id: lsid},
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- writeConcern: {w: 'majority', wtimeout: 200},
- },
- kNodes,
- 'admin');
-
- replTest.stopSet();
+"use strict";
+
+load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/libs/feature_compatibility_version.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+const kNodes = 2;
+
+let replTest = new ReplSetTest({nodes: kNodes});
+replTest.startSet({verbose: 1});
+replTest.initiate();
+
+let priConn = replTest.getPrimary();
+let secConn = replTest.getSecondary();
+
+// Stopping replication on secondaries can take up to 5 seconds normally. Set a small oplog
+// getMore timeout so the test runs faster.
+assert.commandWorked(
+ secConn.adminCommand({configureFailPoint: 'setSmallOplogGetMoreMaxTimeMS', mode: 'alwaysOn'}));
+
+let lsid = UUID();
+
+// Start at an arbitrary txnNumber.
+let txnNumber = 31;
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ insert: 'user',
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ update: 'user',
+ updates: [
+ {q: {_id: 10}, u: {$inc: {x: 1}}},
+ ],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ delete: 'user',
+ deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ findAndModify: 'user',
+ query: {_id: 60},
+ update: {$inc: {x: 1}},
+ new: true,
+ upsert: true,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes);
+
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ setFeatureCompatibilityVersion: lastStableFCV,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin');
+assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: lastStableFCV}));
+checkFCV(priConn.getDB('admin'), lastStableFCV);
+
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ setFeatureCompatibilityVersion: latestFCV,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin');
+assert.commandWorked(priConn.adminCommand({setFeatureCompatibilityVersion: latestFCV}));
+checkFCV(priConn.getDB('admin'), latestFCV);
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ commitTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin',
+ function(conn) {
+ assert.commandWorked(conn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 80}, {_id: 90}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+ }));
+ });
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ prepareTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin',
+ function(conn) {
+ assert.commandWorked(conn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 100}, {_id: 110}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+ }));
+ });
+assert.commandWorked(priConn.adminCommand({
+ abortTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+}));
+
+txnNumber++;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ abortTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin',
+ function(conn) {
+ assert.commandWorked(conn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 120}, {_id: 130}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+ }));
+ assert.commandWorked(conn.adminCommand({
+ prepareTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+ }));
+ });
+
+txnNumber++;
+assert.commandWorked(priConn.getDB('test').runCommand({
+ insert: 'user',
+ documents: [{_id: 140}, {_id: 150}],
+ ordered: false,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ readConcern: {level: 'snapshot'},
+ autocommit: false,
+ startTransaction: true
+}));
+const prepareTS = assert
+ .commandWorked(priConn.adminCommand({
+ prepareTransaction: 1,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority'},
+ }))
+ .prepareTimestamp;
+runWriteConcernRetryabilityTest(priConn,
+ secConn,
+ {
+ commitTransaction: 1,
+ commitTimestamp: prepareTS,
+ lsid: {id: lsid},
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+ writeConcern: {w: 'majority', wtimeout: 200},
+ },
+ kNodes,
+ 'admin');
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js b/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js
index 0a89dcc7390..54e826dcfeb 100644
--- a/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js
+++ b/jstests/replsets/retryable_writes_direct_write_to_config_transactions.js
@@ -1,93 +1,93 @@
// Validates the expected behaviour of direct writes against the `config.transactions` collection
(function() {
- 'use strict';
-
- // Direct writes to config.transactions cannot be part of a session.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- var replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- var priConn = replTest.getPrimary();
- var db = priConn.getDB('TestDB');
- var config = priConn.getDB('config');
-
- assert.writeOK(db.user.insert({_id: 0}));
- assert.writeOK(db.user.insert({_id: 1}));
-
- const lsid1 = UUID();
- const lsid2 = UUID();
-
- const cmdObj1 = {
- update: 'user',
- updates: [{q: {_id: 0}, u: {$inc: {x: 1}}}],
- lsid: {id: lsid1},
- txnNumber: NumberLong(1)
- };
- assert.commandWorked(db.runCommand(cmdObj1));
- assert.eq(1, db.user.find({_id: 0}).toArray()[0].x);
-
- const cmdObj2 = {
- update: 'user',
- updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}],
- lsid: {id: lsid2},
- txnNumber: NumberLong(1)
- };
- assert.commandWorked(db.runCommand(cmdObj2));
- assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
-
- assert.eq(1, config.transactions.find({'_id.id': lsid1}).itcount());
- assert.eq(1, config.transactions.find({'_id.id': lsid2}).itcount());
-
- // Invalidating lsid1 doesn't impact lsid2, but allows same statement to be executed again
- assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
- assert.commandWorked(db.runCommand(cmdObj1));
- assert.eq(2, db.user.find({_id: 0}).toArray()[0].x);
- assert.commandWorked(db.runCommand(cmdObj2));
- assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
-
- // Ensure lsid1 is properly tracked after the recreate
- assert.commandWorked(db.runCommand(cmdObj1));
- assert.eq(2, db.user.find({_id: 0}).toArray()[0].x);
-
- // Ensure garbage data cannot be written to the `config.transactions` collection
- assert.writeError(config.transactions.insert({_id: 'String'}));
- assert.writeError(config.transactions.insert({_id: {UnknownField: 'Garbage'}}));
-
- // Ensure inserting an invalid session record manually without all the required fields causes
- // the session to not work anymore for retryable writes for that session, but not for any other
- const lsidManual = config.transactions.find({'_id.id': lsid1}).toArray()[0]._id;
- assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
- assert.writeOK(config.transactions.insert({_id: lsidManual}));
-
- const lsid3 = UUID();
- assert.commandWorked(db.runCommand({
- update: 'user',
- updates: [{q: {_id: 2}, u: {$inc: {x: 1}}, upsert: true}],
- lsid: {id: lsid3},
- txnNumber: NumberLong(1)
- }));
- assert.eq(1, db.user.find({_id: 2}).toArray()[0].x);
-
- // Ensure dropping the `config.transactions` collection breaks the retryable writes feature, but
- // doesn't crash the server
- assert(config.transactions.drop());
- var res = assert.commandWorkedIgnoringWriteErrors(db.runCommand(cmdObj2));
- assert.eq(0, res.nModified);
- assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
-
- assert(config.dropDatabase());
- res = assert.commandWorkedIgnoringWriteErrors(db.runCommand(cmdObj2));
- assert.eq(0, res.nModified);
- assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
-
- replTest.stopSet();
+'use strict';
+
+// Direct writes to config.transactions cannot be part of a session.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+var replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+var priConn = replTest.getPrimary();
+var db = priConn.getDB('TestDB');
+var config = priConn.getDB('config');
+
+assert.writeOK(db.user.insert({_id: 0}));
+assert.writeOK(db.user.insert({_id: 1}));
+
+const lsid1 = UUID();
+const lsid2 = UUID();
+
+const cmdObj1 = {
+ update: 'user',
+ updates: [{q: {_id: 0}, u: {$inc: {x: 1}}}],
+ lsid: {id: lsid1},
+ txnNumber: NumberLong(1)
+};
+assert.commandWorked(db.runCommand(cmdObj1));
+assert.eq(1, db.user.find({_id: 0}).toArray()[0].x);
+
+const cmdObj2 = {
+ update: 'user',
+ updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}],
+ lsid: {id: lsid2},
+ txnNumber: NumberLong(1)
+};
+assert.commandWorked(db.runCommand(cmdObj2));
+assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
+
+assert.eq(1, config.transactions.find({'_id.id': lsid1}).itcount());
+assert.eq(1, config.transactions.find({'_id.id': lsid2}).itcount());
+
+// Invalidating lsid1 doesn't impact lsid2, but allows same statement to be executed again
+assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
+assert.commandWorked(db.runCommand(cmdObj1));
+assert.eq(2, db.user.find({_id: 0}).toArray()[0].x);
+assert.commandWorked(db.runCommand(cmdObj2));
+assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
+
+// Ensure lsid1 is properly tracked after the recreate
+assert.commandWorked(db.runCommand(cmdObj1));
+assert.eq(2, db.user.find({_id: 0}).toArray()[0].x);
+
+// Ensure garbage data cannot be written to the `config.transactions` collection
+assert.writeError(config.transactions.insert({_id: 'String'}));
+assert.writeError(config.transactions.insert({_id: {UnknownField: 'Garbage'}}));
+
+// Ensure inserting an invalid session record manually without all the required fields causes
+// the session to not work anymore for retryable writes for that session, but not for any other
+const lsidManual = config.transactions.find({'_id.id': lsid1}).toArray()[0]._id;
+assert.writeOK(config.transactions.remove({'_id.id': lsid1}));
+assert.writeOK(config.transactions.insert({_id: lsidManual}));
+
+const lsid3 = UUID();
+assert.commandWorked(db.runCommand({
+ update: 'user',
+ updates: [{q: {_id: 2}, u: {$inc: {x: 1}}, upsert: true}],
+ lsid: {id: lsid3},
+ txnNumber: NumberLong(1)
+}));
+assert.eq(1, db.user.find({_id: 2}).toArray()[0].x);
+
+// Ensure dropping the `config.transactions` collection breaks the retryable writes feature, but
+// doesn't crash the server
+assert(config.transactions.drop());
+var res = assert.commandWorkedIgnoringWriteErrors(db.runCommand(cmdObj2));
+assert.eq(0, res.nModified);
+assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
+
+assert(config.dropDatabase());
+res = assert.commandWorkedIgnoringWriteErrors(db.runCommand(cmdObj2));
+assert.eq(0, res.nModified);
+assert.eq(1, db.user.find({_id: 1}).toArray()[0].x);
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/retryable_writes_failover.js b/jstests/replsets/retryable_writes_failover.js
index 30eb069906d..2073e2fbded 100644
--- a/jstests/replsets/retryable_writes_failover.js
+++ b/jstests/replsets/retryable_writes_failover.js
@@ -3,162 +3,161 @@
* failover.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- function stepDownPrimary(replTest) {
- assert.commandWorked(
- replTest.getPrimary().adminCommand({replSetStepDown: 10, force: true}));
- }
+function stepDownPrimary(replTest) {
+ assert.commandWorked(replTest.getPrimary().adminCommand({replSetStepDown: 10, force: true}));
+}
- const replTest = new ReplSetTest({nodes: 3});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 3});
+replTest.startSet();
+replTest.initiate();
- ////////////////////////////////////////////////////////////////////////
- // Test insert command
+////////////////////////////////////////////////////////////////////////
+// Test insert command
- let insertCmd = {
- insert: "foo",
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(5)
- };
+let insertCmd = {
+ insert: "foo",
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(5)
+};
- // Run the command on the primary and wait for replication.
- let primary = replTest.getPrimary();
- let testDB = primary.getDB("test");
+// Run the command on the primary and wait for replication.
+let primary = replTest.getPrimary();
+let testDB = primary.getDB("test");
- let result = assert.commandWorked(testDB.runCommand(insertCmd));
- assert.eq(2, testDB.foo.find().itcount());
+let result = assert.commandWorked(testDB.runCommand(insertCmd));
+assert.eq(2, testDB.foo.find().itcount());
- replTest.awaitReplication();
+replTest.awaitReplication();
- // Step down the primary and wait for a new one.
- stepDownPrimary(replTest);
+// Step down the primary and wait for a new one.
+stepDownPrimary(replTest);
- let newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB("test");
+let newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB("test");
- let oplog = newPrimary.getDB("local").oplog.rs;
- let insertOplogEntries = oplog.find({ns: "test.foo", op: "i"}).itcount();
+let oplog = newPrimary.getDB("local").oplog.rs;
+let insertOplogEntries = oplog.find({ns: "test.foo", op: "i"}).itcount();
- // Retry the command on the secondary and verify it wasn't repeated.
- let retryResult = assert.commandWorked(testDB.runCommand(insertCmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+// Retry the command on the secondary and verify it wasn't repeated.
+let retryResult = assert.commandWorked(testDB.runCommand(insertCmd));
+assert.eq(result.ok, retryResult.ok);
+assert.eq(result.n, retryResult.n);
+assert.eq(result.writeErrors, retryResult.writeErrors);
+assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- assert.eq(2, testDB.foo.find().itcount());
+assert.eq(2, testDB.foo.find().itcount());
- assert.eq(insertOplogEntries, oplog.find({ns: "test.foo", op: "i"}).itcount());
+assert.eq(insertOplogEntries, oplog.find({ns: "test.foo", op: "i"}).itcount());
- ////////////////////////////////////////////////////////////////////////
- // Test update command
+////////////////////////////////////////////////////////////////////////
+// Test update command
- let updateCmd = {
- update: "foo",
- updates: [
- {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
- {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
- {q: {_id: 30}, u: {z: 1}} // replacement
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(10),
- };
+let updateCmd = {
+ update: "foo",
+ updates: [
+ {q: {_id: 10}, u: {$inc: {x: 1}}}, // in place
+ {q: {_id: 20}, u: {$inc: {y: 1}}, upsert: true},
+ {q: {_id: 30}, u: {z: 1}} // replacement
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(10),
+};
- primary = replTest.getPrimary();
- testDB = primary.getDB("test");
+primary = replTest.getPrimary();
+testDB = primary.getDB("test");
- // Run the command on the primary and wait for replication.
- result = assert.commandWorked(testDB.runCommand(updateCmd));
- assert.eq(3, testDB.foo.find().itcount());
+// Run the command on the primary and wait for replication.
+result = assert.commandWorked(testDB.runCommand(updateCmd));
+assert.eq(3, testDB.foo.find().itcount());
- replTest.awaitReplication();
+replTest.awaitReplication();
- // Step down the primary and wait for a new one.
- stepDownPrimary(replTest);
+// Step down the primary and wait for a new one.
+stepDownPrimary(replTest);
- newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB("test");
+newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB("test");
- oplog = newPrimary.getDB("local").oplog.rs;
- let updateOplogEntries = oplog.find({ns: "test.foo", op: "u"}).itcount();
+oplog = newPrimary.getDB("local").oplog.rs;
+let updateOplogEntries = oplog.find({ns: "test.foo", op: "u"}).itcount();
- // Upserts are stored as inserts if they match no existing documents.
- insertOplogEntries = oplog.find({ns: "test.foo", op: "i"}).itcount();
+// Upserts are stored as inserts if they match no existing documents.
+insertOplogEntries = oplog.find({ns: "test.foo", op: "i"}).itcount();
- // Retry the command on the secondary and verify it wasn't repeated.
- retryResult = assert.commandWorked(testDB.runCommand(updateCmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.nModified, retryResult.nModified);
- assert.eq(result.upserted, retryResult.upserted);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+// Retry the command on the secondary and verify it wasn't repeated.
+retryResult = assert.commandWorked(testDB.runCommand(updateCmd));
+assert.eq(result.ok, retryResult.ok);
+assert.eq(result.n, retryResult.n);
+assert.eq(result.nModified, retryResult.nModified);
+assert.eq(result.upserted, retryResult.upserted);
+assert.eq(result.writeErrors, retryResult.writeErrors);
+assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- assert.eq(3, testDB.foo.find().itcount());
+assert.eq(3, testDB.foo.find().itcount());
- assert.eq({_id: 10, x: 1}, testDB.foo.findOne({_id: 10}));
- assert.eq({_id: 20, y: 1}, testDB.foo.findOne({_id: 20}));
- assert.eq({_id: 30, z: 1}, testDB.foo.findOne({_id: 30}));
+assert.eq({_id: 10, x: 1}, testDB.foo.findOne({_id: 10}));
+assert.eq({_id: 20, y: 1}, testDB.foo.findOne({_id: 20}));
+assert.eq({_id: 30, z: 1}, testDB.foo.findOne({_id: 30}));
- assert.eq(updateOplogEntries, oplog.find({ns: "test.foo", op: "u"}).itcount());
- assert.eq(insertOplogEntries, oplog.find({ns: "test.foo", op: "i"}).itcount());
+assert.eq(updateOplogEntries, oplog.find({ns: "test.foo", op: "u"}).itcount());
+assert.eq(insertOplogEntries, oplog.find({ns: "test.foo", op: "i"}).itcount());
- ////////////////////////////////////////////////////////////////////////
- // Test delete command
+////////////////////////////////////////////////////////////////////////
+// Test delete command
- let deleteCmd = {
- delete: "foo",
- deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(15),
- };
+let deleteCmd = {
+ delete: "foo",
+ deletes: [{q: {x: 1}, limit: 1}, {q: {y: 1}, limit: 1}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(15),
+};
- primary = replTest.getPrimary();
- testDB = primary.getDB("test");
+primary = replTest.getPrimary();
+testDB = primary.getDB("test");
- assert.writeOK(testDB.foo.insert({_id: 40, x: 1}));
- assert.writeOK(testDB.foo.insert({_id: 50, y: 1}));
+assert.writeOK(testDB.foo.insert({_id: 40, x: 1}));
+assert.writeOK(testDB.foo.insert({_id: 50, y: 1}));
- // Run the command on the primary and wait for replication.
- result = assert.commandWorked(testDB.runCommand(deleteCmd));
- assert.eq(1, testDB.foo.find({x: 1}).itcount());
- assert.eq(1, testDB.foo.find({y: 1}).itcount());
+// Run the command on the primary and wait for replication.
+result = assert.commandWorked(testDB.runCommand(deleteCmd));
+assert.eq(1, testDB.foo.find({x: 1}).itcount());
+assert.eq(1, testDB.foo.find({y: 1}).itcount());
- replTest.awaitReplication();
+replTest.awaitReplication();
- // Step down the primary and wait for a new one.
- stepDownPrimary(replTest);
+// Step down the primary and wait for a new one.
+stepDownPrimary(replTest);
- newPrimary = replTest.getPrimary();
- testDB = newPrimary.getDB("test");
+newPrimary = replTest.getPrimary();
+testDB = newPrimary.getDB("test");
- oplog = newPrimary.getDB("local").oplog.rs;
- let deleteOplogEntries = oplog.find({ns: "test.foo", op: "d"}).itcount();
+oplog = newPrimary.getDB("local").oplog.rs;
+let deleteOplogEntries = oplog.find({ns: "test.foo", op: "d"}).itcount();
- // Retry the command on the secondary and verify it wasn't repeated.
- retryResult = assert.commandWorked(testDB.runCommand(deleteCmd));
- assert.eq(result.ok, retryResult.ok);
- assert.eq(result.n, retryResult.n);
- assert.eq(result.writeErrors, retryResult.writeErrors);
- assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
+// Retry the command on the secondary and verify it wasn't repeated.
+retryResult = assert.commandWorked(testDB.runCommand(deleteCmd));
+assert.eq(result.ok, retryResult.ok);
+assert.eq(result.n, retryResult.n);
+assert.eq(result.writeErrors, retryResult.writeErrors);
+assert.eq(result.writeConcernErrors, retryResult.writeConcernErrors);
- assert.eq(1, testDB.foo.find({x: 1}).itcount());
- assert.eq(1, testDB.foo.find({y: 1}).itcount());
+assert.eq(1, testDB.foo.find({x: 1}).itcount());
+assert.eq(1, testDB.foo.find({y: 1}).itcount());
- assert.eq(deleteOplogEntries, oplog.find({ns: "test.foo", op: "d"}).itcount());
+assert.eq(deleteOplogEntries, oplog.find({ns: "test.foo", op: "d"}).itcount());
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/rollback_aborted_prepared_transaction.js b/jstests/replsets/rollback_aborted_prepared_transaction.js
index 8a486323421..b8bdc857992 100644
--- a/jstests/replsets/rollback_aborted_prepared_transaction.js
+++ b/jstests/replsets/rollback_aborted_prepared_transaction.js
@@ -8,101 +8,101 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
-
- const dbName = "test";
- const collName = "rollback_aborted_prepared_transaction";
-
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
-
- // Create collection we're using beforehand.
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
-
- testDB.runCommand({drop: collName});
- assert.commandWorked(testDB.runCommand({create: collName}));
- assert.commandWorked(testColl.insert({_id: 0}));
-
- // Start two sessions on the primary.
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- let sessionColl = sessionDB.getCollection(collName);
-
- let session2 = primary.startSession();
- let sessionColl2 = session2.getDatabase(dbName).getCollection(collName);
-
- // The following transaction will be rolled back.
- rollbackTest.transitionToRollbackOperations();
-
- // Prepare the transaction on the session.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- PrepareHelpers.prepareTransaction(session, {w: 1});
-
- assert.eq(testColl.find().itcount(), 1);
- // This characterizes the current fastcount behavior, which is that active prepared transactions
- // contribute to the fastcount.
- assert.eq(testColl.count(), 2);
-
- // Abort the transaction explicitly.
- assert.commandWorked(session.abortTransaction_forTesting());
-
- assert.eq(testColl.find().itcount(), 1);
- assert.eq(testColl.count(), 1);
-
- // Test that it is impossible to commit a prepared transaction whose prepare oplog entry has not
- // yet majority committed. This also aborts the transaction.
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({_id: 2}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session2, {w: 1});
- let res = assert.commandFailedWithCode(
- PrepareHelpers.commitTransaction(session2, prepareTimestamp), ErrorCodes.InvalidOptions);
- assert(res.errmsg.includes(
- "cannot be run before its prepare oplog entry has been majority committed"),
- res);
- assert.eq(testColl.find().itcount(), 1);
- assert.eq(testColl.count(), 1);
-
- // Check that we have two transactions in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Make sure there are no transactions in the transactions table. This is because both the abort
- // and prepare operations are rolled back, and the entry in the transactions table is only made
- // durable when a transaction is prepared.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 0);
-
- // Make sure the first collection only has one document since the prepared insert was rolled
- // back.
- assert.eq(sessionColl.find().itcount(), 1);
- assert.eq(sessionColl.count(), 1);
-
- // Get the new primary after the topology changes.
- primary = rollbackTest.getPrimary();
- testDB = primary.getDB(dbName);
- testColl = testDB.getCollection(collName);
-
- // Make sure we can successfully run a prepared transaction on the same session after going
- // through rollback.
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- sessionColl = sessionDB.getCollection(collName);
-
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
-
- assert.eq(testColl.find().itcount(), 2);
- assert.eq(testColl.count(), 2);
-
- rollbackTest.stop();
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
+
+const dbName = "test";
+const collName = "rollback_aborted_prepared_transaction";
+
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
+
+// Create collection we're using beforehand.
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
+
+testDB.runCommand({drop: collName});
+assert.commandWorked(testDB.runCommand({create: collName}));
+assert.commandWorked(testColl.insert({_id: 0}));
+
+// Start two sessions on the primary.
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+let sessionColl = sessionDB.getCollection(collName);
+
+let session2 = primary.startSession();
+let sessionColl2 = session2.getDatabase(dbName).getCollection(collName);
+
+// The following transaction will be rolled back.
+rollbackTest.transitionToRollbackOperations();
+
+// Prepare the transaction on the session.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+PrepareHelpers.prepareTransaction(session, {w: 1});
+
+assert.eq(testColl.find().itcount(), 1);
+// This characterizes the current fastcount behavior, which is that active prepared transactions
+// contribute to the fastcount.
+assert.eq(testColl.count(), 2);
+
+// Abort the transaction explicitly.
+assert.commandWorked(session.abortTransaction_forTesting());
+
+assert.eq(testColl.find().itcount(), 1);
+assert.eq(testColl.count(), 1);
+
+// Test that it is impossible to commit a prepared transaction whose prepare oplog entry has not
+// yet majority committed. This also aborts the transaction.
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({_id: 2}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session2, {w: 1});
+let res = assert.commandFailedWithCode(PrepareHelpers.commitTransaction(session2, prepareTimestamp),
+ ErrorCodes.InvalidOptions);
+assert(
+ res.errmsg.includes("cannot be run before its prepare oplog entry has been majority committed"),
+ res);
+assert.eq(testColl.find().itcount(), 1);
+assert.eq(testColl.count(), 1);
+
+// Check that we have two transactions in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// Make sure there are no transactions in the transactions table. This is because both the abort
+// and prepare operations are rolled back, and the entry in the transactions table is only made
+// durable when a transaction is prepared.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 0);
+
+// Make sure the first collection only has one document since the prepared insert was rolled
+// back.
+assert.eq(sessionColl.find().itcount(), 1);
+assert.eq(sessionColl.count(), 1);
+
+// Get the new primary after the topology changes.
+primary = rollbackTest.getPrimary();
+testDB = primary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+
+// Make sure we can successfully run a prepared transaction on the same session after going
+// through rollback.
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+sessionColl = sessionDB.getCollection(collName);
+
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+PrepareHelpers.commitTransaction(session, prepareTimestamp);
+
+assert.eq(testColl.find().itcount(), 2);
+assert.eq(testColl.count(), 2);
+
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/rollback_after_disabling_majority_reads.js b/jstests/replsets/rollback_after_disabling_majority_reads.js
index 159bbdffb88..e8b2eeeebba 100644
--- a/jstests/replsets/rollback_after_disabling_majority_reads.js
+++ b/jstests/replsets/rollback_after_disabling_majority_reads.js
@@ -3,41 +3,43 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- TestData.rollbackShutdowns = true;
- const name = "rollback_after_disabling_majority_reads";
- const dbName = "test";
- const collName = "coll";
+TestData.rollbackShutdowns = true;
+const name = "rollback_after_disabling_majority_reads";
+const dbName = "test";
+const collName = "coll";
- jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=true");
- const replTest = new ReplSetTest(
- {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "true"}});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- replTest.initiate(config);
- const rollbackTest = new RollbackTest(name, replTest);
+jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=true");
+const replTest = new ReplSetTest(
+ {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "true"}});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+replTest.initiate(config);
+const rollbackTest = new RollbackTest(name, replTest);
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- assert.commandWorked(rollbackNode.getDB(dbName).runCommand(
- {insert: collName, documents: [{_id: "rollback op"}]}));
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+assert.commandWorked(
+ rollbackNode.getDB(dbName).runCommand({insert: collName, documents: [{_id: "rollback op"}]}));
- jsTest.log("Restart the rollback node with enableMajorityReadConcern=false");
- rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"});
+jsTest.log("Restart the rollback node with enableMajorityReadConcern=false");
+rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"});
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
- {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
+ {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
- assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
- assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
+assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
+assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
- rollbackTest.stop();
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_after_enabling_majority_reads.js b/jstests/replsets/rollback_after_enabling_majority_reads.js
index a2b2ae6328d..6f33afa7138 100644
--- a/jstests/replsets/rollback_after_enabling_majority_reads.js
+++ b/jstests/replsets/rollback_after_enabling_majority_reads.js
@@ -8,73 +8,75 @@
* @tags: [requires_persistence]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- TestData.rollbackShutdowns = true;
- const name = "rollback_after_enabling_majority_reads";
- const dbName = "test";
- const collName = "coll";
+TestData.rollbackShutdowns = true;
+const name = "rollback_after_enabling_majority_reads";
+const dbName = "test";
+const collName = "coll";
- jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=false");
- const replTest = new ReplSetTest(
- {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- replTest.initiate(config);
- const rollbackTest = new RollbackTest(name, replTest);
+jsTest.log("Set up a Rollback Test with enableMajorityReadConcern=false");
+const replTest = new ReplSetTest(
+ {name, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+replTest.initiate(config);
+const rollbackTest = new RollbackTest(name, replTest);
- jsTest.log("Ensure the stable timestamp is ahead of the common point on the rollback node.");
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- const operationTime = assert
- .commandWorked(rollbackNode.getDB(dbName).runCommand(
- {insert: collName, documents: [{_id: "rollback op"}]}))
- .operationTime;
+jsTest.log("Ensure the stable timestamp is ahead of the common point on the rollback node.");
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+const operationTime = assert
+ .commandWorked(rollbackNode.getDB(dbName).runCommand(
+ {insert: collName, documents: [{_id: "rollback op"}]}))
+ .operationTime;
- // Do a clean shutdown to ensure the recovery timestamp is at operationTime.
- jsTest.log("Restart the rollback node with enableMajorityReadConcern=true");
- rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
- const replSetGetStatusResponse =
- assert.commandWorked(rollbackNode.adminCommand({replSetGetStatus: 1}));
- assert.eq(replSetGetStatusResponse.lastStableRecoveryTimestamp,
- operationTime,
- tojson(replSetGetStatusResponse));
+// Do a clean shutdown to ensure the recovery timestamp is at operationTime.
+jsTest.log("Restart the rollback node with enableMajorityReadConcern=true");
+rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
+const replSetGetStatusResponse =
+ assert.commandWorked(rollbackNode.adminCommand({replSetGetStatus: 1}));
+assert.eq(replSetGetStatusResponse.lastStableRecoveryTimestamp,
+ operationTime,
+ tojson(replSetGetStatusResponse));
- // The rollback crashes because the common point is before the stable timestamp.
- jsTest.log("Attempt to roll back. This will fassert.");
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- assert.soon(() => {
- return rawMongoProgramOutput().indexOf("Fatal Assertion 51121") !== -1;
- });
+// The rollback crashes because the common point is before the stable timestamp.
+jsTest.log("Attempt to roll back. This will fassert.");
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+assert.soon(() => {
+ return rawMongoProgramOutput().indexOf("Fatal Assertion 51121") !== -1;
+});
- jsTest.log(
- "Restart the rollback node with enableMajorityReadConcern=false. Now the rollback can succeed.");
- const allowedExitCode = 14;
- rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"}, allowedExitCode);
+jsTest.log(
+ "Restart the rollback node with enableMajorityReadConcern=false. Now the rollback can succeed.");
+const allowedExitCode = 14;
+rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "false"}, allowedExitCode);
- // Fix counts for "local.startup_log", since they are corrupted by this rollback.
- // transitionToSteadyStateOperations() checks collection counts.
- assert.commandWorked(rollbackNode.getDB("local").runCommand({validate: "startup_log"}));
- rollbackTest.transitionToSteadyStateOperations();
+// Fix counts for "local.startup_log", since they are corrupted by this rollback.
+// transitionToSteadyStateOperations() checks collection counts.
+assert.commandWorked(rollbackNode.getDB("local").runCommand({validate: "startup_log"}));
+rollbackTest.transitionToSteadyStateOperations();
- assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
- {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
+assert.commandWorked(rollbackTest.getPrimary().getDB(dbName)[collName].insert(
+ {_id: "steady state op"}, {writeConcern: {w: "majority"}}));
- assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
- assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
+assert.eq(0, rollbackNode.getDB(dbName)[collName].find({_id: "rollback op"}).itcount());
+assert.eq(1, rollbackNode.getDB(dbName)[collName].find({_id: "steady state op"}).itcount());
- jsTest.log("Restart the rollback node with enableMajorityReadConcern=true.");
- rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
+jsTest.log("Restart the rollback node with enableMajorityReadConcern=true.");
+rollbackTest.restartNode(0, 15, {enableMajorityReadConcern: "true"});
- jsTest.log("Rollback should succeed since the common point is at least the stable timestamp.");
- rollbackTest.transitionToRollbackOperations();
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+jsTest.log("Rollback should succeed since the common point is at least the stable timestamp.");
+rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- rollbackTest.stop();
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_all_op_types.js b/jstests/replsets/rollback_all_op_types.js
index 7af1a3c654d..8ffc53f2faf 100644
--- a/jstests/replsets/rollback_all_op_types.js
+++ b/jstests/replsets/rollback_all_op_types.js
@@ -10,374 +10,372 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test_deluxe.js");
+load("jstests/replsets/libs/rollback_test_deluxe.js");
- let noOp = () => {};
+let noOp = () => {};
- /**
- * All operation types that are able to be rolled back.
- *
- * Each operation type maps to an array of test objects that contains an 'init' function, an
- * 'op' function, and an optional 'description' field. Some operations depend on the current
- * state of the database, so the 'init' function provides a way to set up the database before an
- * operation is executed. All init functions are executed at the very beginning of the test, as
- * part of CommonOps. Also, to provide isolation between commands, each is given its own
- * database to execute in.
- *
- * Each operation has an array of test objects to allow testing of multiple variations of an
- * operation. Each test case in an array will be executed in isolation.
- *
- * Note: The 'dropDatabase' command is excluded and tested separately. It cannot be tested
- * directly using the RollbackTest fixture, since the command is always up-converted to use
- * majority write concern in 3.6.
- *
- */
- let rollbackOps = {
- "insert": [{
+/**
+ * All operation types that are able to be rolled back.
+ *
+ * Each operation type maps to an array of test objects that contains an 'init' function, an
+ * 'op' function, and an optional 'description' field. Some operations depend on the current
+ * state of the database, so the 'init' function provides a way to set up the database before an
+ * operation is executed. All init functions are executed at the very beginning of the test, as
+ * part of CommonOps. Also, to provide isolation between commands, each is given its own
+ * database to execute in.
+ *
+ * Each operation has an array of test objects to allow testing of multiple variations of an
+ * operation. Each test case in an array will be executed in isolation.
+ *
+ * Note: The 'dropDatabase' command is excluded and tested separately. It cannot be tested
+ * directly using the RollbackTest fixture, since the command is always up-converted to use
+ * majority write concern in 3.6.
+ *
+ */
+let rollbackOps = {
+ "insert": [{
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ op: (db, collName) => {
+ assert.writeOK(db[collName].insert({_id: 0}));
+ }
+ }],
+ "update": [{
+ init: (db, collName) => {
+ assert.writeOK(db[collName].insert({_id: 0, val: 0}));
+ },
+ op: (db, collName) => {
+ assert.writeOK(db[collName].update({_id: 0}, {val: 1}));
+ },
+ }],
+ "delete": [{
+ init: (db, collName) => {
+ assert.writeOK(db[collName].insert({_id: 0}));
+ },
+ op: (db, collName) => {
+ assert.writeOK(db[collName].remove({_id: 0}));
+ },
+ }],
+ "create": [{
+ init: noOp,
+ op: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ }],
+ "drop": [{
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand({drop: collName}));
+ },
+ }],
+ "createIndexes": [{
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand({
+ createIndexes: collName,
+ indexes: [{name: collName + "_index", key: {index_key: 1}}]
+ }));
+ }
+ }],
+ "dropIndexes": [
+ {
+ description: "singleIndex",
init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(db.runCommand({
+ createIndexes: collName,
+ indexes: [{name: collName + "_index", key: {index_key: 1}}]
+ }));
+ },
+ op: (db, collName) => {
+ assert.commandWorked(
+ db.runCommand({dropIndexes: collName, index: collName + "_index"}));
+ }
+ },
+ {
+ description: "allIndexes",
+ init: (db, collName) => {
+ assert.commandWorked(db.runCommand({
+ createIndexes: collName,
+ indexes: [
+ {name: collName + "_index_0", key: {index_key_0: 1}},
+ {name: collName + "_index_1", key: {index_key_1: 1}},
+ {name: collName + "_index_2", key: {index_key_2: 1}}
+ ]
+ }));
},
op: (db, collName) => {
- assert.writeOK(db[collName].insert({_id: 0}));
+ assert.commandWorked(db.runCommand({dropIndexes: collName, index: "*"}));
}
- }],
- "update": [{
+ }
+ ],
+ "renameCollection": [
+ {
+ description: "withinSameDatabase",
init: (db, collName) => {
- assert.writeOK(db[collName].insert({_id: 0, val: 0}));
+ assert.commandWorked(db.createCollection(collName + "_source"));
},
op: (db, collName) => {
- assert.writeOK(db[collName].update({_id: 0}, {val: 1}));
+ let nss = db[collName].getFullName();
+ assert.commandWorked(
+ db.adminCommand({renameCollection: nss + "_source", to: nss + "_dest"}));
},
- }],
- "delete": [{
+ },
+ {
+ description: "acrossDatabases",
init: (db, collName) => {
- assert.writeOK(db[collName].insert({_id: 0}));
+ assert.commandWorked(db.createCollection(collName));
},
op: (db, collName) => {
- assert.writeOK(db[collName].remove({_id: 0}));
+ let sourceNss = db[collName].getFullName();
+ let destNss = db.getName() + "_dest." + collName;
+ assert.commandWorked(db.adminCommand({renameCollection: sourceNss, to: destNss}));
},
- }],
- "create": [{
- init: noOp,
- op: (db, collName) => {
+ },
+ {
+ description: "acrossDatabasesDropTarget",
+ init: (db, collName) => {
+ let dbName = db.getName();
+ let destDb = db.getSiblingDB(dbName + "_dest");
assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(destDb.createCollection(collName));
},
- }],
- "drop": [{
+ op: (db, collName) => {
+ let sourceNss = db[collName].getFullName();
+ let destNss = db.getName() + "_dest." + collName;
+ assert.commandWorked(
+ db.adminCommand({renameCollection: sourceNss, to: destNss, dropTarget: true}));
+ },
+ },
+ {
+ description: "dropTarget",
init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(db.createCollection(collName + "_source"));
+ assert.commandWorked(db.createCollection(collName + "_dest"));
},
op: (db, collName) => {
- assert.commandWorked(db.runCommand({drop: collName}));
+ let nss = db[collName].getFullName();
+ assert.commandWorked(db.adminCommand(
+ {renameCollection: nss + "_source", to: nss + "_dest", dropTarget: true}));
},
- }],
- "createIndexes": [{
+ }
+
+ ],
+ "collMod": [
+ {
+ description: "allCollectionOptions",
init: (db, collName) => {
assert.commandWorked(db.createCollection(collName));
},
op: (db, collName) => {
assert.commandWorked(db.runCommand({
- createIndexes: collName,
- indexes: [{name: collName + "_index", key: {index_key: 1}}]
+ collMod: collName,
+ validator: {a: 1},
+ validationLevel: "moderate",
+ validationAction: "warn"
}));
}
- }],
- "dropIndexes": [
- {
- description: "singleIndex",
- init: (db, collName) => {
- assert.commandWorked(db.runCommand({
- createIndexes: collName,
- indexes: [{name: collName + "_index", key: {index_key: 1}}]
- }));
- },
- op: (db, collName) => {
- assert.commandWorked(
- db.runCommand({dropIndexes: collName, index: collName + "_index"}));
- }
- },
- {
- description: "allIndexes",
- init: (db, collName) => {
- assert.commandWorked(db.runCommand({
- createIndexes: collName,
- indexes: [
- {name: collName + "_index_0", key: {index_key_0: 1}},
- {name: collName + "_index_1", key: {index_key_1: 1}},
- {name: collName + "_index_2", key: {index_key_2: 1}}
- ]
- }));
- },
- op: (db, collName) => {
- assert.commandWorked(db.runCommand({dropIndexes: collName, index: "*"}));
- }
- }
- ],
- "renameCollection": [
- {
- description: "withinSameDatabase",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName + "_source"));
- },
- op: (db, collName) => {
- let nss = db[collName].getFullName();
- assert.commandWorked(
- db.adminCommand({renameCollection: nss + "_source", to: nss + "_dest"}));
- },
- },
- {
- description: "acrossDatabases",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- op: (db, collName) => {
- let sourceNss = db[collName].getFullName();
- let destNss = db.getName() + "_dest." + collName;
- assert.commandWorked(db.adminCommand({renameCollection: sourceNss, to: destNss}));
- },
- },
- {
- description: "acrossDatabasesDropTarget",
- init: (db, collName) => {
- let dbName = db.getName();
- let destDb = db.getSiblingDB(dbName + "_dest");
- assert.commandWorked(db.createCollection(collName));
- assert.commandWorked(destDb.createCollection(collName));
- },
- op: (db, collName) => {
- let sourceNss = db[collName].getFullName();
- let destNss = db.getName() + "_dest." + collName;
- assert.commandWorked(db.adminCommand(
- {renameCollection: sourceNss, to: destNss, dropTarget: true}));
- },
+ },
+ {
+ description: "validationOptionsWithoutValidator",
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
},
- {
- description: "dropTarget",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName + "_source"));
- assert.commandWorked(db.createCollection(collName + "_dest"));
- },
- op: (db, collName) => {
- let nss = db[collName].getFullName();
- assert.commandWorked(db.adminCommand(
- {renameCollection: nss + "_source", to: nss + "_dest", dropTarget: true}));
- },
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand(
+ {collMod: collName, validationLevel: "moderate", validationAction: "warn"}));
}
-
- ],
- "collMod": [
- {
- description: "allCollectionOptions",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- op: (db, collName) => {
- assert.commandWorked(db.runCommand({
- collMod: collName,
- validator: {a: 1},
- validationLevel: "moderate",
- validationAction: "warn"
- }));
- }
- },
- {
- description: "validationOptionsWithoutValidator",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- op: (db, collName) => {
- assert.commandWorked(db.runCommand(
- {collMod: collName, validationLevel: "moderate", validationAction: "warn"}));
- }
+ },
+ {
+ description: "existingValidationOptions",
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ assert.commandWorked(db.runCommand(
+ {collMod: collName, validationLevel: "moderate", validationAction: "warn"}));
},
- {
- description: "existingValidationOptions",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- assert.commandWorked(db.runCommand(
- {collMod: collName, validationLevel: "moderate", validationAction: "warn"}));
- },
- op: (db, collName) => {
- assert.commandWorked(db.runCommand({
- collMod: collName,
- validator: {a: 1},
- validationLevel: "moderate",
- validationAction: "warn"
- }));
- }
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand({
+ collMod: collName,
+ validator: {a: 1},
+ validationLevel: "moderate",
+ validationAction: "warn"
+ }));
}
- ],
- "convertToCapped": [{
+ }
+ ],
+ "convertToCapped": [{
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
+ },
+ op: (db, collName) => {
+ assert.commandWorked(db.runCommand({convertToCapped: collName, size: 1024}));
+ },
+ }],
+ "applyOps": [
+ {
+ description: "multipleCRUDOps",
init: (db, collName) => {
assert.commandWorked(db.createCollection(collName));
},
+ // In 3.6 only document CRUD operations are grouped into a single applyOps oplog
+ // entry.
op: (db, collName) => {
- assert.commandWorked(db.runCommand({convertToCapped: collName, size: 1024}));
- },
- }],
- "applyOps": [
- {
- description: "multipleCRUDOps",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- // In 3.6 only document CRUD operations are grouped into a single applyOps oplog
- // entry.
- op: (db, collName) => {
- let collInfo = db.getCollectionInfos({name: collName})[0];
- let uuid = collInfo.info.uuid;
- let coll = db.getCollection(collName);
- let opsToApply = [
- {op: "i", ns: coll.getFullName(), ui: uuid, o: {_id: 0}},
- {
+ let collInfo = db.getCollectionInfos({name: collName})[0];
+ let uuid = collInfo.info.uuid;
+ let coll = db.getCollection(collName);
+ let opsToApply = [
+ {op: "i", ns: coll.getFullName(), ui: uuid, o: {_id: 0}},
+ {
op: "u",
ns: coll.getFullName(),
ui: uuid,
o: {_id: 0, val: 1},
o2: {_id: 0},
- },
- {op: "d", ns: coll.getFullName(), ui: uuid, o: {_id: 0}}
- ];
- assert.commandWorked(db.adminCommand({applyOps: opsToApply}));
- }
+ },
+ {op: "d", ns: coll.getFullName(), ui: uuid, o: {_id: 0}}
+ ];
+ assert.commandWorked(db.adminCommand({applyOps: opsToApply}));
+ }
+ },
+ {
+ description: "opWithoutUUID",
+ init: (db, collName) => {
+ assert.commandWorked(db.createCollection(collName));
},
- {
- description: "opWithoutUUID",
- init: (db, collName) => {
- assert.commandWorked(db.createCollection(collName));
- },
- // In 3.6 only document CRUD operations are grouped into a single applyOps oplog
- // entry.
- op: (db, collName) => {
- let coll = db.getCollection(collName);
- let opsToApply = [
- {op: "i", ns: coll.getFullName(), o: {_id: 0}},
- ];
- assert.commandWorked(db.adminCommand({applyOps: opsToApply}));
- }
+ // In 3.6 only document CRUD operations are grouped into a single applyOps oplog
+ // entry.
+ op: (db, collName) => {
+ let coll = db.getCollection(collName);
+ let opsToApply = [
+ {op: "i", ns: coll.getFullName(), o: {_id: 0}},
+ ];
+ assert.commandWorked(db.adminCommand({applyOps: opsToApply}));
}
- ]
- };
+ }
+ ]
+};
- let testCollName = "test";
- let opNames = Object.keys(rollbackOps);
+let testCollName = "test";
+let opNames = Object.keys(rollbackOps);
- /**
- * Create the test name string given an operation name and the test case index. The test
- * name for the nth test case of an operation called "opName", with description "description",
- * will be "opName_<n>_description".
- */
- function opTestNameStr(opName, description, ind) {
- let opVariantName = opName + "_" + ind;
- if (description) {
- opVariantName = opVariantName + "_" + description;
- }
- return opVariantName;
+/**
+ * Create the test name string given an operation name and the test case index. The test
+ * name for the nth test case of an operation called "opName", with description "description",
+ * will be "opName_<n>_description".
+ */
+function opTestNameStr(opName, description, ind) {
+ let opVariantName = opName + "_" + ind;
+ if (description) {
+ opVariantName = opVariantName + "_" + description;
}
+ return opVariantName;
+}
- /**
- * Operations that will be present on both nodes, before the common point.
- */
- let CommonOps = (node) => {
- // Ensure there is at least one common op between nodes.
- node.getDB("commonOp")["test"].insert({_id: "common_op"});
+/**
+ * Operations that will be present on both nodes, before the common point.
+ */
+let CommonOps = (node) => {
+ // Ensure there is at least one common op between nodes.
+ node.getDB("commonOp")["test"].insert({_id: "common_op"});
- // Run init functions for each op type. Each is given its own database to run in and a
- // standard collection name to use.
- jsTestLog("Performing init operations for every operation type.");
- opNames.forEach(opName => {
- let opObj = rollbackOps[opName];
- opObj.forEach((opVariantObj, ind) => {
- let opVariantName = opTestNameStr(opName, opVariantObj.description, ind);
- opVariantObj.init(node.getDB(opVariantName), testCollName);
- });
+ // Run init functions for each op type. Each is given its own database to run in and a
+ // standard collection name to use.
+ jsTestLog("Performing init operations for every operation type.");
+ opNames.forEach(opName => {
+ let opObj = rollbackOps[opName];
+ opObj.forEach((opVariantObj, ind) => {
+ let opVariantName = opTestNameStr(opName, opVariantObj.description, ind);
+ opVariantObj.init(node.getDB(opVariantName), testCollName);
});
- };
-
- /**
- * Operations that will be performed on the rollback node past the common point.
- */
- let RollbackOps = (node) => {
+ });
+};
- // Returns a new object with any metadata fields from the given command object removed.
- function basicCommandObj(fullCommandObj) {
- let basicCommandObj = {};
- for (let field in fullCommandObj) {
- if (fullCommandObj.hasOwnProperty(field) && !field.startsWith("$")) {
- basicCommandObj[field] = fullCommandObj[field];
- }
+/**
+ * Operations that will be performed on the rollback node past the common point.
+ */
+let RollbackOps = (node) => {
+ // Returns a new object with any metadata fields from the given command object removed.
+ function basicCommandObj(fullCommandObj) {
+ let basicCommandObj = {};
+ for (let field in fullCommandObj) {
+ if (fullCommandObj.hasOwnProperty(field) && !field.startsWith("$")) {
+ basicCommandObj[field] = fullCommandObj[field];
}
- return basicCommandObj;
}
+ return basicCommandObj;
+ }
- // Execute the operation given by 'opFn'. 'opName' is the string identifier of the
- // operation to be executed.
- function executeOp(opName, opFn) {
- // Override 'runCommand' so we can capture the raw command object for each operation
- // and log it, to improve diagnostics.
- const runCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function(dbName, commandObj, options) {
- jsTestLog("Executing command for '" + opName + "' test: \n" +
- tojson(basicCommandObj(commandObj)));
- return runCommandOriginal.apply(this, arguments);
- };
+ // Execute the operation given by 'opFn'. 'opName' is the string identifier of the
+ // operation to be executed.
+ function executeOp(opName, opFn) {
+ // Override 'runCommand' so we can capture the raw command object for each operation
+ // and log it, to improve diagnostics.
+ const runCommandOriginal = Mongo.prototype.runCommand;
+ Mongo.prototype.runCommand = function(dbName, commandObj, options) {
+ jsTestLog("Executing command for '" + opName + "' test: \n" +
+ tojson(basicCommandObj(commandObj)));
+ return runCommandOriginal.apply(this, arguments);
+ };
- opFn(node.getDB(opName), testCollName);
+ opFn(node.getDB(opName), testCollName);
- // Reset runCommand to its normal behavior.
- Mongo.prototype.runCommand = runCommandOriginal;
- }
+ // Reset runCommand to its normal behavior.
+ Mongo.prototype.runCommand = runCommandOriginal;
+ }
- jsTestLog("Performing rollback operations for every operation type.");
- opNames.forEach(opName => {
- let opObj = rollbackOps[opName];
- // Execute all test cases for this operation type.
- jsTestLog("Performing '" + opName + "' operations.");
- opObj.forEach((opVariantObj, ind) => {
- let opVariantName = opTestNameStr(opName, opVariantObj.description, ind);
- executeOp(opVariantName, opVariantObj.op);
- });
+ jsTestLog("Performing rollback operations for every operation type.");
+ opNames.forEach(opName => {
+ let opObj = rollbackOps[opName];
+ // Execute all test cases for this operation type.
+ jsTestLog("Performing '" + opName + "' operations.");
+ opObj.forEach((opVariantObj, ind) => {
+ let opVariantName = opTestNameStr(opName, opVariantObj.description, ind);
+ executeOp(opVariantName, opVariantObj.op);
});
+ });
+};
- };
-
- // Set up Rollback Test.
- let rollbackTest = new RollbackTestDeluxe();
- CommonOps(rollbackTest.getPrimary());
+// Set up Rollback Test.
+let rollbackTest = new RollbackTestDeluxe();
+CommonOps(rollbackTest.getPrimary());
- // Perform the operations that will be rolled back.
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+// Perform the operations that will be rolled back.
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Complete cycle one of rollback. Data consistency is checked automatically after entering
- // steady state.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Complete cycle one of rollback. Data consistency is checked automatically after entering
+// steady state.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- // Again, perform operations that will be rolled back. This time, each node in the replica set
- // has assumed a different role and will roll back operations that were applied in a different
- // state (e.g. as a SECONDARY as opposed to a PRIMARY).
- rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+// Again, perform operations that will be rolled back. This time, each node in the replica set
+// has assumed a different role and will roll back operations that were applied in a different
+// state (e.g. as a SECONDARY as opposed to a PRIMARY).
+rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Complete cycle two of rollback.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Complete cycle two of rollback.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- // Perform operations that will be rolled back one more time.
- rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+// Perform operations that will be rolled back one more time.
+rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Complete cycle three of rollback. After this cycle is completed, the replica set returns to
- // its original topology.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Complete cycle three of rollback. After this cycle is completed, the replica set returns to
+// its original topology.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- // Check the replica set.
- rollbackTest.stop();
+// Check the replica set.
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_auth.js b/jstests/replsets/rollback_auth.js
index d7703ea3824..e85ce9b8082 100644
--- a/jstests/replsets/rollback_auth.js
+++ b/jstests/replsets/rollback_auth.js
@@ -11,211 +11,210 @@
// @tags: [requires_persistence]
(function() {
- "use strict";
-
- // Arbiters don't replicate the admin.system.keys collection, so they can never validate or sign
- // clusterTime. Gossiping a clusterTime to an arbiter as a user other than __system will fail,
- // so we skip gossiping for this test.
- //
- // TODO SERVER-32639: remove this flag.
- TestData.skipGossipingClusterTime = true;
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- // helper function for verifying contents at the end of the test
- var checkFinalResults = function(db) {
- assert.commandWorked(db.runCommand({dbStats: 1}));
- assert.commandFailedWithCode(db.runCommand({collStats: 'foo'}), authzErrorCode);
- assert.commandFailedWithCode(db.runCommand({collStats: 'bar'}), authzErrorCode);
- assert.commandWorked(db.runCommand({collStats: 'baz'}));
- assert.commandWorked(db.runCommand({collStats: 'foobar'}));
- };
-
- var authzErrorCode = 13;
-
- jsTestLog("Setting up replica set");
-
- var name = "rollbackAuth";
- var replTest = new ReplSetTest({name: name, nodes: 3, keyFile: 'jstests/libs/key1'});
- var nodes = replTest.nodeList();
- var conns = replTest.startSet();
- replTest.initiate({
- "_id": "rollbackAuth",
- "members": [
- {"_id": 0, "host": nodes[0], "priority": 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- });
+"use strict";
- // Make sure we have a master
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var master = replTest.getPrimary();
- var a_conn = conns[0];
- var b_conn = conns[1];
- a_conn.setSlaveOk();
- b_conn.setSlaveOk();
- var A = a_conn.getDB("admin");
- var B = b_conn.getDB("admin");
- var a = a_conn.getDB("test");
- var b = b_conn.getDB("test");
- assert.eq(master, conns[0], "conns[0] assumed to be master");
- assert.eq(a_conn, master);
-
- // Make sure we have an arbiter
- assert.soon(function() {
- var res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1});
- return res.myState == 7;
- }, "Arbiter failed to initialize.");
-
- jsTestLog("Creating initial data");
-
- // Create collections that will be used in test
- A.createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
- A.auth('admin', 'pwd');
- a.foo.insert({a: 1});
- a.bar.insert({a: 1});
- a.baz.insert({a: 1});
- a.foobar.insert({a: 1});
-
- // Set up user admin user
- A.createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
- A.auth('userAdmin', 'pwd'); // Logs out of admin@admin user
- B.auth('userAdmin', 'pwd');
-
- // Create a basic user and role
- A.createRole({
- role: 'replStatusRole', // To make awaitReplication() work
- roles: [],
- privileges: [
- {resource: {cluster: true}, actions: ['replSetGetStatus']},
- {resource: {db: 'local', collection: ''}, actions: ['find']},
- {resource: {db: 'local', collection: 'system.replset'}, actions: ['find']}
- ]
- });
- a.createRole({
- role: 'myRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: ''}, actions: ['dbStats']}]
- });
- a.createUser(
- {user: 'spencer', pwd: 'pwd', roles: ['myRole', {role: 'replStatusRole', db: 'admin'}]});
- assert(a.auth('spencer', 'pwd'));
-
- // wait for secondary to get this data
- assert.soon(function() {
- return b.auth('spencer', 'pwd');
+// Arbiters don't replicate the admin.system.keys collection, so they can never validate or sign
+// clusterTime. Gossiping a clusterTime to an arbiter as a user other than __system will fail,
+// so we skip gossiping for this test.
+//
+// TODO SERVER-32639: remove this flag.
+TestData.skipGossipingClusterTime = true;
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+// helper function for verifying contents at the end of the test
+var checkFinalResults = function(db) {
+ assert.commandWorked(db.runCommand({dbStats: 1}));
+ assert.commandFailedWithCode(db.runCommand({collStats: 'foo'}), authzErrorCode);
+ assert.commandFailedWithCode(db.runCommand({collStats: 'bar'}), authzErrorCode);
+ assert.commandWorked(db.runCommand({collStats: 'baz'}));
+ assert.commandWorked(db.runCommand({collStats: 'foobar'}));
+};
+
+var authzErrorCode = 13;
+
+jsTestLog("Setting up replica set");
+
+var name = "rollbackAuth";
+var replTest = new ReplSetTest({name: name, nodes: 3, keyFile: 'jstests/libs/key1'});
+var nodes = replTest.nodeList();
+var conns = replTest.startSet();
+replTest.initiate({
+ "_id": "rollbackAuth",
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
+
+// Make sure we have a master
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+var master = replTest.getPrimary();
+var a_conn = conns[0];
+var b_conn = conns[1];
+a_conn.setSlaveOk();
+b_conn.setSlaveOk();
+var A = a_conn.getDB("admin");
+var B = b_conn.getDB("admin");
+var a = a_conn.getDB("test");
+var b = b_conn.getDB("test");
+assert.eq(master, conns[0], "conns[0] assumed to be master");
+assert.eq(a_conn, master);
+
+// Make sure we have an arbiter
+assert.soon(function() {
+ var res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1});
+ return res.myState == 7;
+}, "Arbiter failed to initialize.");
+
+jsTestLog("Creating initial data");
+
+// Create collections that will be used in test
+A.createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
+A.auth('admin', 'pwd');
+a.foo.insert({a: 1});
+a.bar.insert({a: 1});
+a.baz.insert({a: 1});
+a.foobar.insert({a: 1});
+
+// Set up user admin user
+A.createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
+A.auth('userAdmin', 'pwd'); // Logs out of admin@admin user
+B.auth('userAdmin', 'pwd');
+
+// Create a basic user and role
+A.createRole({
+ role: 'replStatusRole', // To make awaitReplication() work
+ roles: [],
+ privileges: [
+ {resource: {cluster: true}, actions: ['replSetGetStatus']},
+ {resource: {db: 'local', collection: ''}, actions: ['find']},
+ {resource: {db: 'local', collection: 'system.replset'}, actions: ['find']}
+ ]
+});
+a.createRole({
+ role: 'myRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['dbStats']}]
+});
+a.createUser(
+ {user: 'spencer', pwd: 'pwd', roles: ['myRole', {role: 'replStatusRole', db: 'admin'}]});
+assert(a.auth('spencer', 'pwd'));
+
+// wait for secondary to get this data
+assert.soon(function() {
+ return b.auth('spencer', 'pwd');
+});
+
+assert.commandWorked(a.runCommand({dbStats: 1}));
+assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'bar'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'baz'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'foobar'}), authzErrorCode);
+
+assert.commandWorked(b.runCommand({dbStats: 1}));
+assert.commandFailedWithCode(b.runCommand({collStats: 'foo'}), authzErrorCode);
+assert.commandFailedWithCode(b.runCommand({collStats: 'bar'}), authzErrorCode);
+assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
+assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
+
+jsTestLog("Doing writes that will eventually be rolled back");
+
+// down A and wait for B to become master
+replTest.stop(0);
+assert.soon(function() {
+ try {
+ return B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+}, "B didn't become master");
+printjson(b.adminCommand('replSetGetStatus'));
+
+// Modify the the user and role in a way that will be rolled back.
+b.grantPrivilegesToRole('myRole',
+ [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}],
+ {}); // Default write concern will wait for majority, which will time out.
+b.createRole({
+ role: 'temporaryRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}]
+},
+ {}); // Default write concern will wait for majority, which will time out.
+b.grantRolesToUser('spencer',
+ ['temporaryRole'],
+ {}); // Default write concern will wait for majority, which will time out.
+
+assert.commandWorked(b.runCommand({dbStats: 1}));
+assert.commandWorked(b.runCommand({collStats: 'foo'}));
+assert.commandWorked(b.runCommand({collStats: 'bar'}));
+assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
+assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
+
+// down B, bring A back up, then wait for A to become master
+// insert new data into A so that B will need to rollback when it reconnects to A
+replTest.stop(1);
+
+replTest.restart(0);
+assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+}, "A didn't become master");
+
+// A should not have the new data as it was down
+assert.commandWorked(a.runCommand({dbStats: 1}));
+assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'bar'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'baz'}), authzErrorCode);
+assert.commandFailedWithCode(a.runCommand({collStats: 'foobar'}), authzErrorCode);
+
+jsTestLog("Doing writes that should persist after the rollback");
+// Modify the user and role in a way that will persist.
+A.auth('userAdmin', 'pwd');
+// Default write concern will wait for majority, which would time out
+// so we override it with an empty write concern
+a.grantPrivilegesToRole(
+ 'myRole', [{resource: {db: 'test', collection: 'baz'}, actions: ['collStats']}], {});
+
+a.createRole({
+ role: 'persistentRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: 'foobar'}, actions: ['collStats']}]
+},
+ {});
+a.grantRolesToUser('spencer', ['persistentRole'], {});
+A.logout();
+a.auth('spencer', 'pwd');
+
+// A has the data we just wrote, but not what B wrote before
+checkFinalResults(a);
+
+jsTestLog("Triggering rollback");
+
+// bring B back in contact with A
+// as A is primary, B will roll back and then catch up
+replTest.restart(1);
+assert.soonNoExcept(function() {
+ authutil.asCluster(replTest.nodes, 'jstests/libs/key1', function() {
+ replTest.awaitReplication();
});
- assert.commandWorked(a.runCommand({dbStats: 1}));
- assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'bar'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'baz'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'foobar'}), authzErrorCode);
-
- assert.commandWorked(b.runCommand({dbStats: 1}));
- assert.commandFailedWithCode(b.runCommand({collStats: 'foo'}), authzErrorCode);
- assert.commandFailedWithCode(b.runCommand({collStats: 'bar'}), authzErrorCode);
- assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
- assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
-
- jsTestLog("Doing writes that will eventually be rolled back");
-
- // down A and wait for B to become master
- replTest.stop(0);
- assert.soon(function() {
- try {
- return B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- }, "B didn't become master");
- printjson(b.adminCommand('replSetGetStatus'));
-
- // Modify the the user and role in a way that will be rolled back.
- b.grantPrivilegesToRole(
- 'myRole',
- [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}],
- {}); // Default write concern will wait for majority, which will time out.
- b.createRole({
- role: 'temporaryRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}]
- },
- {}); // Default write concern will wait for majority, which will time out.
- b.grantRolesToUser('spencer',
- ['temporaryRole'],
- {}); // Default write concern will wait for majority, which will time out.
-
- assert.commandWorked(b.runCommand({dbStats: 1}));
- assert.commandWorked(b.runCommand({collStats: 'foo'}));
- assert.commandWorked(b.runCommand({collStats: 'bar'}));
- assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
- assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
-
- // down B, bring A back up, then wait for A to become master
- // insert new data into A so that B will need to rollback when it reconnects to A
- replTest.stop(1);
-
- replTest.restart(0);
- assert.soon(function() {
- try {
- return A.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- }, "A didn't become master");
-
- // A should not have the new data as it was down
- assert.commandWorked(a.runCommand({dbStats: 1}));
- assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'bar'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'baz'}), authzErrorCode);
- assert.commandFailedWithCode(a.runCommand({collStats: 'foobar'}), authzErrorCode);
-
- jsTestLog("Doing writes that should persist after the rollback");
- // Modify the user and role in a way that will persist.
- A.auth('userAdmin', 'pwd');
- // Default write concern will wait for majority, which would time out
- // so we override it with an empty write concern
- a.grantPrivilegesToRole(
- 'myRole', [{resource: {db: 'test', collection: 'baz'}, actions: ['collStats']}], {});
-
- a.createRole({
- role: 'persistentRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: 'foobar'}, actions: ['collStats']}]
- },
- {});
- a.grantRolesToUser('spencer', ['persistentRole'], {});
- A.logout();
- a.auth('spencer', 'pwd');
-
- // A has the data we just wrote, but not what B wrote before
- checkFinalResults(a);
-
- jsTestLog("Triggering rollback");
-
- // bring B back in contact with A
- // as A is primary, B will roll back and then catch up
- replTest.restart(1);
- assert.soonNoExcept(function() {
- authutil.asCluster(replTest.nodes, 'jstests/libs/key1', function() {
- replTest.awaitReplication();
- });
-
- return b.auth('spencer', 'pwd');
- });
- // Now both A and B should agree
- checkFinalResults(a);
- checkFinalResults(b);
+ return b.auth('spencer', 'pwd');
+});
+// Now both A and B should agree
+checkFinalResults(a);
+checkFinalResults(b);
- // Verify data consistency between nodes.
- authutil.asCluster(replTest.nodes, 'jstests/libs/key1', function() {
- replTest.checkOplogs();
- });
+// Verify data consistency between nodes.
+authutil.asCluster(replTest.nodes, 'jstests/libs/key1', function() {
+ replTest.checkOplogs();
+});
- // DB hash check is done in stopSet.
- replTest.stopSet();
+// DB hash check is done in stopSet.
+replTest.stopSet();
}());
diff --git a/jstests/replsets/rollback_capped_deletions.js b/jstests/replsets/rollback_capped_deletions.js
index 213efd8b1fd..86928d2601f 100644
--- a/jstests/replsets/rollback_capped_deletions.js
+++ b/jstests/replsets/rollback_capped_deletions.js
@@ -2,47 +2,47 @@
* Tests that capped collections get the correct fastcounts after rollback.
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/replsets/libs/rollback_test.js');
+load('jstests/replsets/libs/rollback_test.js');
- const testName = 'rollback_capped_deletions';
- const dbName = testName;
- const collName = 'cappedCollName';
+const testName = 'rollback_capped_deletions';
+const dbName = testName;
+const collName = 'cappedCollName';
- const rollbackTest = new RollbackTest(testName);
- const primary = rollbackTest.getPrimary();
- const testDb = primary.getDB(dbName);
+const rollbackTest = new RollbackTest(testName);
+const primary = rollbackTest.getPrimary();
+const testDb = primary.getDB(dbName);
- assert.commandWorked(testDb.runCommand({
- 'create': collName,
- 'capped': true,
- 'size': 40,
- }));
- const coll = testDb.getCollection(collName);
- assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(testDb.runCommand({
+ 'create': collName,
+ 'capped': true,
+ 'size': 40,
+}));
+const coll = testDb.getCollection(collName);
+assert.commandWorked(coll.insert({a: 1}));
- rollbackTest.awaitLastOpCommitted();
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+rollbackTest.awaitLastOpCommitted();
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
- assert.commandWorked(coll.insert({bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: 1}));
- assert.commandWorked(coll.insert({cccccccccccccccccccccccccccccccccccccccccccc: 1}));
- assert.commandWorked(coll.insert({dddddddddddddddddddddddddddddddddddddddddddd: 1}));
- assert.commandWorked(coll.insert({eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee: 1}));
+assert.commandWorked(coll.insert({bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb: 1}));
+assert.commandWorked(coll.insert({cccccccccccccccccccccccccccccccccccccccccccc: 1}));
+assert.commandWorked(coll.insert({dddddddddddddddddddddddddddddddddddddddddddd: 1}));
+assert.commandWorked(coll.insert({eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee: 1}));
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- assert.commandWorked(coll.insert({ffffffffffffffffffffffffffffffffffffffffffff: 1}));
+assert.commandWorked(coll.insert({ffffffffffffffffffffffffffffffffffffffffffff: 1}));
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- try {
- rollbackTest.transitionToSteadyStateOperations();
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+try {
+ rollbackTest.transitionToSteadyStateOperations();
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
- rollbackTest.stop();
+rollbackTest.stop();
})(); \ No newline at end of file
diff --git a/jstests/replsets/rollback_collmods.js b/jstests/replsets/rollback_collmods.js
index 6f910ac7a3a..6a741ec6174 100644
--- a/jstests/replsets/rollback_collmods.js
+++ b/jstests/replsets/rollback_collmods.js
@@ -4,105 +4,105 @@
*/
(function() {
- "use strict";
-
- load("jstests/replsets/libs/rollback_test_deluxe.js");
-
- const testName = "rollback_collmods";
- const dbName = testName;
-
- var coll1Name = "NoInitialValidationAtAll";
- var coll2Name = "NoInitialValidationAction";
- var coll3Name = "NoInitialValidator";
- var coll4Name = "NoInitialValidationLevel";
-
- function printCollectionOptionsForNode(node, time) {
- let opts = assert.commandWorked(node.getDB(dbName).runCommand({"listCollections": 1}));
- jsTestLog("Collection options " + time + " on " + node.host + ": " + tojson(opts));
- }
-
- function printCollectionOptions(rollbackTest, time) {
- printCollectionOptionsForNode(rollbackTest.getPrimary(), time);
- rollbackTest.getSecondaries().forEach(node => printCollectionOptionsForNode(node, time));
- }
-
- // Operations that will be present on both nodes, before the common point.
- let CommonOps = (node) => {
- let testDb = node.getDB(dbName);
- assert.writeOK(testDb[coll1Name].insert({a: 1, b: 1}));
- assert.writeOK(testDb[coll2Name].insert({a: 2, b: 2}));
- assert.writeOK(testDb[coll3Name].insert({a: 3, b: 3}));
- assert.writeOK(testDb[coll4Name].insert({a: 4, b: 4}));
-
- // Start with no validation action.
- assert.commandWorked(testDb.runCommand({
- collMod: coll2Name,
- validator: {a: 1},
- validationLevel: "moderate",
- }));
-
- // Start with no validator.
- assert.commandWorked(testDb.runCommand(
- {collMod: coll3Name, validationLevel: "moderate", validationAction: "warn"}));
-
- // Start with no validation level.
- assert.commandWorked(
- testDb.runCommand({collMod: coll4Name, validator: {a: 1}, validationAction: "warn"}));
- };
-
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- let testDb = node.getDB(dbName);
-
- // Set everything on the rollback node.
- assert.commandWorked(testDb.runCommand({
- collMod: coll1Name,
- validator: {a: 1},
- validationLevel: "moderate",
- validationAction: "warn"
- }));
-
- // Only modify the action, and never modify it again so it needs to be reset to empty.
- assert.commandWorked(testDb.runCommand({collMod: coll2Name, validationAction: "error"}));
-
- // Only modify the validator, and never modify it again so it needs to be reset to empty.
- assert.commandWorked(testDb.runCommand({collMod: coll3Name, validator: {b: 1}}));
-
- // Only modify the level, and never modify it again so it needs to be reset to empty.
- assert.commandWorked(testDb.runCommand({
- collMod: coll4Name,
- validationLevel: "moderate",
- }));
- };
-
- // Operations that will be performed on the sync source node after rollback.
- let SteadyStateOps = (node) => {
- let testDb = node.getDB(dbName);
-
- assert.commandWorked(testDb.runCommand({collMod: coll2Name, validator: {b: 1}}));
- assert.commandWorked(testDb.runCommand({collMod: coll3Name, validationAction: "error"}));
- assert.commandWorked(testDb.runCommand({collMod: coll4Name, validationAction: "error"}));
- };
-
- // Set up Rollback Test.
- let rollbackTest = new RollbackTestDeluxe(testName);
- CommonOps(rollbackTest.getPrimary());
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- printCollectionOptions(rollbackTest, "before branch");
- RollbackOps(rollbackNode);
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- printCollectionOptions(rollbackTest, "before rollback");
- // No ops on the sync source.
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
- printCollectionOptions(rollbackTest, "after rollback");
-
- SteadyStateOps(rollbackTest.getPrimary());
- printCollectionOptions(rollbackTest, "at completion");
-
- rollbackTest.stop();
+"use strict";
+
+load("jstests/replsets/libs/rollback_test_deluxe.js");
+
+const testName = "rollback_collmods";
+const dbName = testName;
+
+var coll1Name = "NoInitialValidationAtAll";
+var coll2Name = "NoInitialValidationAction";
+var coll3Name = "NoInitialValidator";
+var coll4Name = "NoInitialValidationLevel";
+
+function printCollectionOptionsForNode(node, time) {
+ let opts = assert.commandWorked(node.getDB(dbName).runCommand({"listCollections": 1}));
+ jsTestLog("Collection options " + time + " on " + node.host + ": " + tojson(opts));
+}
+
+function printCollectionOptions(rollbackTest, time) {
+ printCollectionOptionsForNode(rollbackTest.getPrimary(), time);
+ rollbackTest.getSecondaries().forEach(node => printCollectionOptionsForNode(node, time));
+}
+
+// Operations that will be present on both nodes, before the common point.
+let CommonOps = (node) => {
+ let testDb = node.getDB(dbName);
+ assert.writeOK(testDb[coll1Name].insert({a: 1, b: 1}));
+ assert.writeOK(testDb[coll2Name].insert({a: 2, b: 2}));
+ assert.writeOK(testDb[coll3Name].insert({a: 3, b: 3}));
+ assert.writeOK(testDb[coll4Name].insert({a: 4, b: 4}));
+
+ // Start with no validation action.
+ assert.commandWorked(testDb.runCommand({
+ collMod: coll2Name,
+ validator: {a: 1},
+ validationLevel: "moderate",
+ }));
+
+ // Start with no validator.
+ assert.commandWorked(testDb.runCommand(
+ {collMod: coll3Name, validationLevel: "moderate", validationAction: "warn"}));
+
+ // Start with no validation level.
+ assert.commandWorked(
+ testDb.runCommand({collMod: coll4Name, validator: {a: 1}, validationAction: "warn"}));
+};
+
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ let testDb = node.getDB(dbName);
+
+ // Set everything on the rollback node.
+ assert.commandWorked(testDb.runCommand({
+ collMod: coll1Name,
+ validator: {a: 1},
+ validationLevel: "moderate",
+ validationAction: "warn"
+ }));
+
+ // Only modify the action, and never modify it again so it needs to be reset to empty.
+ assert.commandWorked(testDb.runCommand({collMod: coll2Name, validationAction: "error"}));
+
+ // Only modify the validator, and never modify it again so it needs to be reset to empty.
+ assert.commandWorked(testDb.runCommand({collMod: coll3Name, validator: {b: 1}}));
+
+ // Only modify the level, and never modify it again so it needs to be reset to empty.
+ assert.commandWorked(testDb.runCommand({
+ collMod: coll4Name,
+ validationLevel: "moderate",
+ }));
+};
+
+// Operations that will be performed on the sync source node after rollback.
+let SteadyStateOps = (node) => {
+ let testDb = node.getDB(dbName);
+
+ assert.commandWorked(testDb.runCommand({collMod: coll2Name, validator: {b: 1}}));
+ assert.commandWorked(testDb.runCommand({collMod: coll3Name, validationAction: "error"}));
+ assert.commandWorked(testDb.runCommand({collMod: coll4Name, validationAction: "error"}));
+};
+
+// Set up Rollback Test.
+let rollbackTest = new RollbackTestDeluxe(testName);
+CommonOps(rollbackTest.getPrimary());
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+printCollectionOptions(rollbackTest, "before branch");
+RollbackOps(rollbackNode);
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+printCollectionOptions(rollbackTest, "before rollback");
+// No ops on the sync source.
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+printCollectionOptions(rollbackTest, "after rollback");
+
+SteadyStateOps(rollbackTest.getPrimary());
+printCollectionOptions(rollbackTest, "at completion");
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_crud_op_sequences.js b/jstests/replsets/rollback_crud_op_sequences.js
index 19e83b9cc92..ce21957f45e 100644
--- a/jstests/replsets/rollback_crud_op_sequences.js
+++ b/jstests/replsets/rollback_crud_op_sequences.js
@@ -14,130 +14,130 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- // helper function for verifying contents at the end of the test
- var checkFinalResults = function(db) {
- assert.eq(0, db.bar.count({q: 70}));
- assert.eq(2, db.bar.count({q: 40}));
- assert.eq(3, db.bar.count({a: "foo"}));
- assert.eq(6, db.bar.count({q: {$gt: -1}}));
- assert.eq(1, db.bar.count({txt: "foo"}));
- assert.eq(33, db.bar.findOne({q: 0})["y"]);
- assert.eq(1, db.kap.find().itcount());
- assert.eq(0, db.kap2.find().itcount());
- };
+"use strict";
+// helper function for verifying contents at the end of the test
+var checkFinalResults = function(db) {
+ assert.eq(0, db.bar.count({q: 70}));
+ assert.eq(2, db.bar.count({q: 40}));
+ assert.eq(3, db.bar.count({a: "foo"}));
+ assert.eq(6, db.bar.count({q: {$gt: -1}}));
+ assert.eq(1, db.bar.count({txt: "foo"}));
+ assert.eq(33, db.bar.findOne({q: 0})["y"]);
+ assert.eq(1, db.kap.find().itcount());
+ assert.eq(0, db.kap2.find().itcount());
+};
- var name = "rollback_crud_op_sequences";
- var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- var nodes = replTest.nodeList();
+var name = "rollback_crud_op_sequences";
+var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+var nodes = replTest.nodeList();
- var conns = replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], priority: 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- });
+var conns = replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
- // Make sure we have a master and that that master is node A
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var master = replTest.getPrimary();
- var a_conn = conns[0];
- a_conn.setSlaveOk();
- var A = a_conn.getDB("admin");
- var b_conn = conns[1];
- b_conn.setSlaveOk();
- var B = b_conn.getDB("admin");
- assert.eq(master, conns[0], "conns[0] assumed to be master");
- assert.eq(a_conn, master);
+// Make sure we have a master and that that master is node A
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+var master = replTest.getPrimary();
+var a_conn = conns[0];
+a_conn.setSlaveOk();
+var A = a_conn.getDB("admin");
+var b_conn = conns[1];
+b_conn.setSlaveOk();
+var B = b_conn.getDB("admin");
+assert.eq(master, conns[0], "conns[0] assumed to be master");
+assert.eq(a_conn, master);
- // Wait for initial replication
- var a = a_conn.getDB("foo");
- var b = b_conn.getDB("foo");
+// Wait for initial replication
+var a = a_conn.getDB("foo");
+var b = b_conn.getDB("foo");
- // initial data for both nodes
- assert.writeOK(a.bar.insert({q: 0}));
- assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
- assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
- assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
- assert.writeOK(a.bar.insert({q: 40, a: 1}));
- assert.writeOK(a.bar.insert({q: 40, a: 2}));
- assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
- a.createCollection("kap", {capped: true, size: 5000});
- assert.writeOK(a.kap.insert({foo: 1}));
- // going back to empty on capped is a special case and must be tested
- a.createCollection("kap2", {capped: true, size: 5501});
- replTest.awaitReplication();
+// initial data for both nodes
+assert.writeOK(a.bar.insert({q: 0}));
+assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
+assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
+assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
+assert.writeOK(a.bar.insert({q: 40, a: 1}));
+assert.writeOK(a.bar.insert({q: 40, a: 2}));
+assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
+a.createCollection("kap", {capped: true, size: 5000});
+assert.writeOK(a.kap.insert({foo: 1}));
+// going back to empty on capped is a special case and must be tested
+a.createCollection("kap2", {capped: true, size: 5501});
+replTest.awaitReplication();
- // isolate A and wait for B to become master
- conns[0].disconnect(conns[1]);
- conns[0].disconnect(conns[2]);
- assert.soon(function() {
- try {
- return B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- }, "node B did not become master as expected", ReplSetTest.kDefaultTimeoutMS);
+// isolate A and wait for B to become master
+conns[0].disconnect(conns[1]);
+conns[0].disconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+}, "node B did not become master as expected", ReplSetTest.kDefaultTimeoutMS);
- // do operations on B and B alone, these will be rolled back
- assert.writeOK(b.bar.insert({q: 4}));
- assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
- assert.writeOK(b.bar.remove({q: 40})); // multi remove test
- assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
- // rolling back a delete will involve reinserting the item(s)
- assert.writeOK(b.bar.remove({q: 1}));
- assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
- assert.writeOK(b.kap.insert({foo: 2}));
- assert.writeOK(b.kap2.insert({foo: 2}));
- // create a collection (need to roll back the whole thing)
- assert.writeOK(b.newcoll.insert({a: true}));
- // create a new empty collection (need to roll back the whole thing)
- b.createCollection("abc");
+// do operations on B and B alone, these will be rolled back
+assert.writeOK(b.bar.insert({q: 4}));
+assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
+assert.writeOK(b.bar.remove({q: 40})); // multi remove test
+assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
+// rolling back a delete will involve reinserting the item(s)
+assert.writeOK(b.bar.remove({q: 1}));
+assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
+assert.writeOK(b.kap.insert({foo: 2}));
+assert.writeOK(b.kap2.insert({foo: 2}));
+// create a collection (need to roll back the whole thing)
+assert.writeOK(b.newcoll.insert({a: true}));
+// create a new empty collection (need to roll back the whole thing)
+b.createCollection("abc");
- // isolate B, bring A back into contact with the arbiter, then wait for A to become master
- // insert new data into A so that B will need to rollback when it reconnects to A
- conns[1].disconnect(conns[2]);
- assert.soon(function() {
- try {
- return !B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
+// isolate B, bring A back into contact with the arbiter, then wait for A to become master
+// insert new data into A so that B will need to rollback when it reconnects to A
+conns[1].disconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return !B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
- conns[0].reconnect(conns[2]);
- assert.soon(function() {
- try {
- return A.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
- assert.gte(a.bar.find().itcount(), 1, "count check");
- assert.writeOK(a.bar.insert({txt: 'foo'}));
- assert.writeOK(a.bar.remove({q: 70}));
- assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
+conns[0].reconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
+assert.gte(a.bar.find().itcount(), 1, "count check");
+assert.writeOK(a.bar.insert({txt: 'foo'}));
+assert.writeOK(a.bar.remove({q: 70}));
+assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
- // A is 1 2 3 7 8
- // B is 1 2 3 4 5 6
- // put B back in contact with A and arbiter, as A is primary, B will rollback and then catch up
- conns[1].reconnect(conns[2]);
- conns[0].reconnect(conns[1]);
+// A is 1 2 3 7 8
+// B is 1 2 3 4 5 6
+// put B back in contact with A and arbiter, as A is primary, B will rollback and then catch up
+conns[1].reconnect(conns[2]);
+conns[0].reconnect(conns[1]);
- awaitOpTime(b_conn, a_conn);
+awaitOpTime(b_conn, a_conn);
- // await steady state and ensure the two nodes have the same contents
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
- checkFinalResults(a);
- checkFinalResults(b);
+// await steady state and ensure the two nodes have the same contents
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+checkFinalResults(a);
+checkFinalResults(b);
- // Verify data consistency between nodes.
- replTest.checkReplicatedDataHashes();
- replTest.checkOplogs();
+// Verify data consistency between nodes.
+replTest.checkReplicatedDataHashes();
+replTest.checkOplogs();
- replTest.stopSet(15);
+replTest.stopSet(15);
}());
diff --git a/jstests/replsets/rollback_ddl_op_sequences.js b/jstests/replsets/rollback_ddl_op_sequences.js
index d074bb33d4e..79883eac336 100644
--- a/jstests/replsets/rollback_ddl_op_sequences.js
+++ b/jstests/replsets/rollback_ddl_op_sequences.js
@@ -14,155 +14,155 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- // helper function for verifying contents at the end of the test
- var checkFinalResults = function(db) {
- assert.eq(2, db.b.getIndexes().length);
- assert.eq(2, db.oldname.getIndexes().length);
- assert.eq(2, db.oldname.find().itcount());
- assert.eq(1, db.kap.find().itcount());
- assert(db.kap.isCapped());
- assert.eq(0, db.bar.count({q: 70}));
- assert.eq(33, db.bar.findOne({q: 0})["y"]);
- assert.eq(0, db.bar.count({q: 70}));
- assert.eq(1, db.bar.count({txt: "foo"}));
- assert.eq(200, db.bar.count({i: {$gt: -1}}));
- assert.eq(6, db.bar.count({q: {$gt: -1}}));
- assert.eq(0, db.getSiblingDB("abc").foo.find().itcount());
- assert.eq(0, db.getSiblingDB("abc").bar.find().itcount());
- };
+"use strict";
+// helper function for verifying contents at the end of the test
+var checkFinalResults = function(db) {
+ assert.eq(2, db.b.getIndexes().length);
+ assert.eq(2, db.oldname.getIndexes().length);
+ assert.eq(2, db.oldname.find().itcount());
+ assert.eq(1, db.kap.find().itcount());
+ assert(db.kap.isCapped());
+ assert.eq(0, db.bar.count({q: 70}));
+ assert.eq(33, db.bar.findOne({q: 0})["y"]);
+ assert.eq(0, db.bar.count({q: 70}));
+ assert.eq(1, db.bar.count({txt: "foo"}));
+ assert.eq(200, db.bar.count({i: {$gt: -1}}));
+ assert.eq(6, db.bar.count({q: {$gt: -1}}));
+ assert.eq(0, db.getSiblingDB("abc").foo.find().itcount());
+ assert.eq(0, db.getSiblingDB("abc").bar.find().itcount());
+};
- var name = "rollback_ddl_op_sequences";
- var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- var nodes = replTest.nodeList();
+var name = "rollback_ddl_op_sequences";
+var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+var nodes = replTest.nodeList();
- var conns = replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], priority: 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- });
+var conns = replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
- // Make sure we have a master and that that master is node A
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var master = replTest.getPrimary();
- var a_conn = conns[0];
- a_conn.setSlaveOk();
- var A = a_conn.getDB("admin");
- var b_conn = conns[1];
- b_conn.setSlaveOk();
- var B = b_conn.getDB("admin");
- assert.eq(master, conns[0], "conns[0] assumed to be master");
- assert.eq(a_conn, master);
+// Make sure we have a master and that that master is node A
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+var master = replTest.getPrimary();
+var a_conn = conns[0];
+a_conn.setSlaveOk();
+var A = a_conn.getDB("admin");
+var b_conn = conns[1];
+b_conn.setSlaveOk();
+var B = b_conn.getDB("admin");
+assert.eq(master, conns[0], "conns[0] assumed to be master");
+assert.eq(a_conn, master);
- // Wait for initial replication
- var a = a_conn.getDB("foo");
- var b = b_conn.getDB("foo");
+// Wait for initial replication
+var a = a_conn.getDB("foo");
+var b = b_conn.getDB("foo");
- // initial data for both nodes
- assert.writeOK(a.b.insert({x: 1}));
- a.b.ensureIndex({x: 1});
- assert.writeOK(a.oldname.insert({y: 1}));
- assert.writeOK(a.oldname.insert({y: 2}));
- a.oldname.ensureIndex({y: 1}, true);
- assert.writeOK(a.bar.insert({q: 0}));
- assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
- assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
- assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
- assert.writeOK(a.bar.insert({q: 40333333, a: 1}));
- for (var i = 0; i < 200; i++) {
- assert.writeOK(a.bar.insert({i: i}));
- }
- assert.writeOK(a.bar.insert({q: 40, a: 2}));
- assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
- a.createCollection("kap", {capped: true, size: 5000});
- assert.writeOK(a.kap.insert({foo: 1}));
- replTest.awaitReplication();
+// initial data for both nodes
+assert.writeOK(a.b.insert({x: 1}));
+a.b.ensureIndex({x: 1});
+assert.writeOK(a.oldname.insert({y: 1}));
+assert.writeOK(a.oldname.insert({y: 2}));
+a.oldname.ensureIndex({y: 1}, true);
+assert.writeOK(a.bar.insert({q: 0}));
+assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
+assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
+assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
+assert.writeOK(a.bar.insert({q: 40333333, a: 1}));
+for (var i = 0; i < 200; i++) {
+ assert.writeOK(a.bar.insert({i: i}));
+}
+assert.writeOK(a.bar.insert({q: 40, a: 2}));
+assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
+a.createCollection("kap", {capped: true, size: 5000});
+assert.writeOK(a.kap.insert({foo: 1}));
+replTest.awaitReplication();
- // isolate A and wait for B to become master
- conns[0].disconnect(conns[1]);
- conns[0].disconnect(conns[2]);
- assert.soon(function() {
- try {
- return B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
+// isolate A and wait for B to become master
+conns[0].disconnect(conns[1]);
+conns[0].disconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
- // do operations on B and B alone, these will be rolled back
- assert.writeOK(b.bar.insert({q: 4}));
- assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
- assert.writeOK(b.bar.remove({q: 40})); // multi remove test
- assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
- // rolling back a delete will involve reinserting the item(s)
- assert.writeOK(b.bar.remove({q: 1}));
- assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
- assert.writeOK(b.kap.insert({foo: 2}));
- assert.writeOK(b.kap2.insert({foo: 2}));
- // create a collection (need to roll back the whole thing)
- assert.writeOK(b.newcoll.insert({a: true}));
- // create a new empty collection (need to roll back the whole thing)
- b.createCollection("abc");
- // drop a collection - we'll need all its data back!
- b.bar.drop();
- // drop an index - verify it comes back
- b.b.dropIndexes();
- // two to see if we transitively rollback?
- b.oldname.renameCollection("newname");
- b.newname.renameCollection("fooname");
- assert(b.fooname.find().itcount() > 0, "count rename");
- // create an index - verify that it is removed
- b.fooname.ensureIndex({q: 1});
- // test roll back (drop) a whole database
- var abc = b.getSisterDB("abc");
- assert.writeOK(abc.foo.insert({x: 1}));
- assert.writeOK(abc.bar.insert({y: 999}));
+// do operations on B and B alone, these will be rolled back
+assert.writeOK(b.bar.insert({q: 4}));
+assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
+assert.writeOK(b.bar.remove({q: 40})); // multi remove test
+assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
+// rolling back a delete will involve reinserting the item(s)
+assert.writeOK(b.bar.remove({q: 1}));
+assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
+assert.writeOK(b.kap.insert({foo: 2}));
+assert.writeOK(b.kap2.insert({foo: 2}));
+// create a collection (need to roll back the whole thing)
+assert.writeOK(b.newcoll.insert({a: true}));
+// create a new empty collection (need to roll back the whole thing)
+b.createCollection("abc");
+// drop a collection - we'll need all its data back!
+b.bar.drop();
+// drop an index - verify it comes back
+b.b.dropIndexes();
+// two to see if we transitively rollback?
+b.oldname.renameCollection("newname");
+b.newname.renameCollection("fooname");
+assert(b.fooname.find().itcount() > 0, "count rename");
+// create an index - verify that it is removed
+b.fooname.ensureIndex({q: 1});
+// test roll back (drop) a whole database
+var abc = b.getSisterDB("abc");
+assert.writeOK(abc.foo.insert({x: 1}));
+assert.writeOK(abc.bar.insert({y: 999}));
- // isolate B, bring A back into contact with the arbiter, then wait for A to become master
- // insert new data into A so that B will need to rollback when it reconnects to A
- conns[1].disconnect(conns[2]);
- assert.soon(function() {
- try {
- return !B.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
+// isolate B, bring A back into contact with the arbiter, then wait for A to become master
+// insert new data into A so that B will need to rollback when it reconnects to A
+conns[1].disconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return !B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
- conns[0].reconnect(conns[2]);
- assert.soon(function() {
- try {
- return A.isMaster().ismaster;
- } catch (e) {
- return false;
- }
- });
- assert(a.bar.find().itcount() >= 1, "count check");
- assert.writeOK(a.bar.insert({txt: 'foo'}));
- assert.writeOK(a.bar.remove({q: 70}));
- assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
+conns[0].reconnect(conns[2]);
+assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+});
+assert(a.bar.find().itcount() >= 1, "count check");
+assert.writeOK(a.bar.insert({txt: 'foo'}));
+assert.writeOK(a.bar.remove({q: 70}));
+assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
- // A is 1 2 3 7 8
- // B is 1 2 3 4 5 6
- // put B back in contact with A and arbiter, as A is primary, B will rollback and then catch up
- conns[1].reconnect(conns[2]);
- conns[0].reconnect(conns[1]);
+// A is 1 2 3 7 8
+// B is 1 2 3 4 5 6
+// put B back in contact with A and arbiter, as A is primary, B will rollback and then catch up
+conns[1].reconnect(conns[2]);
+conns[0].reconnect(conns[1]);
- awaitOpTime(b_conn, a_conn);
+awaitOpTime(b_conn, a_conn);
- // await steady state and ensure the two nodes have the same contents
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
- checkFinalResults(a);
- checkFinalResults(b);
+// await steady state and ensure the two nodes have the same contents
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+checkFinalResults(a);
+checkFinalResults(b);
- // Verify data consistency between nodes.
- replTest.checkReplicatedDataHashes();
- replTest.checkOplogs();
+// Verify data consistency between nodes.
+replTest.checkReplicatedDataHashes();
+replTest.checkOplogs();
- replTest.stopSet(15);
+replTest.stopSet(15);
}());
diff --git a/jstests/replsets/rollback_drop_database.js b/jstests/replsets/rollback_drop_database.js
index 70fb8561140..aa783cc9dd3 100644
--- a/jstests/replsets/rollback_drop_database.js
+++ b/jstests/replsets/rollback_drop_database.js
@@ -1,73 +1,72 @@
/*
-* Test that the server is able to roll back a 'dropDatabase' entry correctly. This test creates
-* a collection, then executes a 'dropDatabase' command, partitioning the primary such that the
-* final 'dropDatabase' oplog entry is not replicated. The test then forces rollback of that entry.
-*
-* The 'dropDatabase' command drops each collection, ensures that the last drop is committed,
-* and only then logs a 'dropDatabase' oplog entry. This is therefore the only entry that could
-* get rolled back.
-*/
+ * Test that the server is able to roll back a 'dropDatabase' entry correctly. This test creates
+ * a collection, then executes a 'dropDatabase' command, partitioning the primary such that the
+ * final 'dropDatabase' oplog entry is not replicated. The test then forces rollback of that entry.
+ *
+ * The 'dropDatabase' command drops each collection, ensures that the last drop is committed,
+ * and only then logs a 'dropDatabase' oplog entry. This is therefore the only entry that could
+ * get rolled back.
+ */
(function() {
- load("jstests/replsets/libs/rollback_test.js");
- load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/rollback_test.js");
+load("jstests/libs/check_log.js");
- const testName = "rollback_drop_database";
- const oldDbName = "oldDatabase";
- const newDbName = "newDatabase";
+const testName = "rollback_drop_database";
+const oldDbName = "oldDatabase";
+const newDbName = "newDatabase";
- let rollbackTest = new RollbackTest(testName);
- let rollbackNode = rollbackTest.getPrimary();
- let syncSourceNode = rollbackTest.getSecondary();
+let rollbackTest = new RollbackTest(testName);
+let rollbackNode = rollbackTest.getPrimary();
+let syncSourceNode = rollbackTest.getSecondary();
- // Perform initial insert (common operation).
- assert.writeOK(rollbackNode.getDB(oldDbName)["beforeRollback"].insert({"num": 1}));
+// Perform initial insert (common operation).
+assert.writeOK(rollbackNode.getDB(oldDbName)["beforeRollback"].insert({"num": 1}));
- // Set a failpoint on the original primary, so that it blocks after it commits the last
- // 'dropCollection' entry but before the 'dropDatabase' entry is logged.
- assert.commandWorked(rollbackNode.adminCommand(
- {configureFailPoint: "dropDatabaseHangBeforeLog", mode: "alwaysOn"}));
+// Set a failpoint on the original primary, so that it blocks after it commits the last
+// 'dropCollection' entry but before the 'dropDatabase' entry is logged.
+assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "dropDatabaseHangBeforeLog", mode: "alwaysOn"}));
- // Issue a 'dropDatabase' command.
- let dropDatabaseFn = function() {
- const rollbackDb = "oldDatabase";
- var primary = db.getMongo();
- jsTestLog("Dropping database " + rollbackDb + " on primary node " + primary.host);
- var dbToDrop = db.getSiblingDB(rollbackDb);
- assert.commandWorked(dbToDrop.dropDatabase({w: 1}));
- };
- let waitForDropDatabaseToFinish = startParallelShell(dropDatabaseFn, rollbackNode.port);
+// Issue a 'dropDatabase' command.
+let dropDatabaseFn = function() {
+ const rollbackDb = "oldDatabase";
+ var primary = db.getMongo();
+ jsTestLog("Dropping database " + rollbackDb + " on primary node " + primary.host);
+ var dbToDrop = db.getSiblingDB(rollbackDb);
+ assert.commandWorked(dbToDrop.dropDatabase({w: 1}));
+};
+let waitForDropDatabaseToFinish = startParallelShell(dropDatabaseFn, rollbackNode.port);
- // Ensure that we've hit the failpoint before moving on.
- checkLog.contains(rollbackNode, "dropDatabase - fail point dropDatabaseHangBeforeLog enabled");
+// Ensure that we've hit the failpoint before moving on.
+checkLog.contains(rollbackNode, "dropDatabase - fail point dropDatabaseHangBeforeLog enabled");
- // Wait for the secondary to finish dropping the collection (the last replicated entry).
- // We use the default 10-minute timeout for this.
- assert.soon(function() {
- let res = syncSourceNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback");
- return !res;
- }, "Sync source did not finish dropping collection beforeRollback", 10 * 60 * 1000);
+// Wait for the secondary to finish dropping the collection (the last replicated entry).
+// We use the default 10-minute timeout for this.
+assert.soon(function() {
+ let res = syncSourceNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback");
+ return !res;
+}, "Sync source did not finish dropping collection beforeRollback", 10 * 60 * 1000);
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- // Allow the final 'dropDatabase' entry to be logged on the now isolated primary.
- // This is the rollback node's divergent oplog entry.
- assert.commandWorked(
- rollbackNode.adminCommand({configureFailPoint: "dropDatabaseHangBeforeLog", mode: "off"}));
- waitForDropDatabaseToFinish();
- assert.eq(false, rollbackNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback"));
- jsTestLog("Database " + oldDbName + " successfully dropped on primary node " +
- rollbackNode.host);
+// Allow the final 'dropDatabase' entry to be logged on the now isolated primary.
+// This is the rollback node's divergent oplog entry.
+assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "dropDatabaseHangBeforeLog", mode: "off"}));
+waitForDropDatabaseToFinish();
+assert.eq(false, rollbackNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback"));
+jsTestLog("Database " + oldDbName + " successfully dropped on primary node " + rollbackNode.host);
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- // Perform an insert on another database while interfacing with the new primary.
- // This is the sync source's divergent oplog entry.
- assert.writeOK(syncSourceNode.getDB(newDbName)["afterRollback"].insert({"num": 2}));
+// Perform an insert on another database while interfacing with the new primary.
+// This is the sync source's divergent oplog entry.
+assert.writeOK(syncSourceNode.getDB(newDbName)["afterRollback"].insert({"num": 2}));
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_drop_index_after_rename.js b/jstests/replsets/rollback_drop_index_after_rename.js
index efeffcdbf68..143701e8e95 100644
--- a/jstests/replsets/rollback_drop_index_after_rename.js
+++ b/jstests/replsets/rollback_drop_index_after_rename.js
@@ -6,53 +6,53 @@
*/
(function() {
- "use strict";
-
- load("jstests/replsets/libs/rollback_test.js");
-
- const testName = "rollback_drop_index_after_rename";
- const dbName = testName;
-
- var fromColl = "fromColl";
- var toColl = "toColl";
- var idxName = "a_1";
-
- // Operations that will be present on both nodes, before the common point.
- let CommonOps = (node) => {
- let testDb = node.getDB(dbName);
- // This creates the collection implicitly and then creates the index.
- assert.commandWorked(testDb.runCommand({
- createIndexes: fromColl,
- indexes: [{
- key: {
- "a": 1,
- },
- name: idxName
- }]
- }));
- };
-
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- let testDb = node.getDB(dbName);
- assert.commandWorked(testDb.adminCommand({
- renameCollection: dbName + "." + fromColl,
- to: dbName + "." + toColl,
- }));
- assert.commandWorked(testDb.runCommand({dropIndexes: toColl, index: idxName}));
- };
-
- // Set up Rollback Test.
- let rollbackTest = new RollbackTest(testName);
- CommonOps(rollbackTest.getPrimary());
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- rollbackTest.stop();
+"use strict";
+
+load("jstests/replsets/libs/rollback_test.js");
+
+const testName = "rollback_drop_index_after_rename";
+const dbName = testName;
+
+var fromColl = "fromColl";
+var toColl = "toColl";
+var idxName = "a_1";
+
+// Operations that will be present on both nodes, before the common point.
+let CommonOps = (node) => {
+ let testDb = node.getDB(dbName);
+ // This creates the collection implicitly and then creates the index.
+ assert.commandWorked(testDb.runCommand({
+ createIndexes: fromColl,
+ indexes: [{
+ key: {
+ "a": 1,
+ },
+ name: idxName
+ }]
+ }));
+};
+
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ let testDb = node.getDB(dbName);
+ assert.commandWorked(testDb.adminCommand({
+ renameCollection: dbName + "." + fromColl,
+ to: dbName + "." + toColl,
+ }));
+ assert.commandWorked(testDb.runCommand({dropIndexes: toColl, index: idxName}));
+};
+
+// Set up Rollback Test.
+let rollbackTest = new RollbackTest(testName);
+CommonOps(rollbackTest.getPrimary());
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_dup_ids.js b/jstests/replsets/rollback_dup_ids.js
index a56b2b9bc05..99c329b76d7 100644
--- a/jstests/replsets/rollback_dup_ids.js
+++ b/jstests/replsets/rollback_dup_ids.js
@@ -1,43 +1,42 @@
// When run with --majorityReadConcern=off, this test reproduces the bug described in SERVER-38925,
// where rolling back a delete followed by a restart produces documents with duplicate _id.
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- TestData.rollbackShutdowns = true;
- TestData.allowUncleanShutdowns = true;
- let dbName = "test";
- let sourceCollName = "coll";
+TestData.rollbackShutdowns = true;
+TestData.allowUncleanShutdowns = true;
+let dbName = "test";
+let sourceCollName = "coll";
- let doc1 = {_id: 1, x: "document_of_interest"};
+let doc1 = {_id: 1, x: "document_of_interest"};
- let CommonOps = (node) => {
- // Insert a document that will exist on all nodes.
- assert.commandWorked(node.getDB(dbName)[sourceCollName].insert(doc1));
- };
+let CommonOps = (node) => {
+ // Insert a document that will exist on all nodes.
+ assert.commandWorked(node.getDB(dbName)[sourceCollName].insert(doc1));
+};
- let RollbackOps = (node) => {
- // Delete the document on rollback node so it will be refetched from sync source.
- assert.commandWorked(node.getDB(dbName)[sourceCollName].remove({_id: 1}));
- };
+let RollbackOps = (node) => {
+ // Delete the document on rollback node so it will be refetched from sync source.
+ assert.commandWorked(node.getDB(dbName)[sourceCollName].remove({_id: 1}));
+};
- // Set up Rollback Test.
- let rollbackTest = new RollbackTest();
- CommonOps(rollbackTest.getPrimary());
+// Set up Rollback Test.
+let rollbackTest = new RollbackTest();
+CommonOps(rollbackTest.getPrimary());
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- // Kill and restart the node that rolled back.
- rollbackTest.restartNode(0, 9);
-
- // Check the replica set.
- rollbackTest.stop();
+// Kill and restart the node that rolled back.
+rollbackTest.restartNode(0, 9);
+// Check the replica set.
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_files_no_prepare_conflict.js b/jstests/replsets/rollback_files_no_prepare_conflict.js
index 1eb1e7a3a4c..40cc954b068 100644
--- a/jstests/replsets/rollback_files_no_prepare_conflict.js
+++ b/jstests/replsets/rollback_files_no_prepare_conflict.js
@@ -8,49 +8,49 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
- const name = "rollback_files_no_prepare_conflicts";
- const dbName = "test";
- const collName = name;
+const name = "rollback_files_no_prepare_conflicts";
+const dbName = "test";
+const collName = name;
- const rollbackTest = new RollbackTest(name);
+const rollbackTest = new RollbackTest(name);
- let primary = rollbackTest.getPrimary();
- let testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
+let primary = rollbackTest.getPrimary();
+let testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
- jsTestLog("Issue an insert that will be common to both nodes.");
- assert.commandWorked(testColl.insert({_id: 42, a: "one"}));
+jsTestLog("Issue an insert that will be common to both nodes.");
+assert.commandWorked(testColl.insert({_id: 42, a: "one"}));
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- const session = primary.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = primary.startSession();
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- jsTestLog("Make an update to that document outside of a transaction on the rollback node.");
- assert.commandWorked(testColl.update({_id: 42, a: "one"}, {_id: 42, a: "two"}));
+jsTestLog("Make an update to that document outside of a transaction on the rollback node.");
+assert.commandWorked(testColl.update({_id: 42, a: "one"}, {_id: 42, a: "two"}));
- session.startTransaction();
+session.startTransaction();
- jsTestLog("Update the same document on the same node, this time as part of a transaction.");
- assert.commandWorked(sessionColl.update({_id: 42, a: "two"}, {_id: 42, a: "three"}));
+jsTestLog("Update the same document on the same node, this time as part of a transaction.");
+assert.commandWorked(sessionColl.update({_id: 42, a: "two"}, {_id: 42, a: "three"}));
- jsTestLog("Prepare the transaction on the rollback node.");
- PrepareHelpers.prepareTransaction(session, {w: 1});
+jsTestLog("Prepare the transaction on the rollback node.");
+PrepareHelpers.prepareTransaction(session, {w: 1});
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
- jsTestLog("Verify that the document is in the same state as it was at the common point.");
- primary = rollbackTest.getPrimary();
- testDB = primary.getDB(dbName);
- testColl = testDB.getCollection(collName);
- assert.docEq(testColl.findOne({_id: 42}), {_id: 42, a: "one"});
+jsTestLog("Verify that the document is in the same state as it was at the common point.");
+primary = rollbackTest.getPrimary();
+testDB = primary.getDB(dbName);
+testColl = testDB.getCollection(collName);
+assert.docEq(testColl.findOne({_id: 42}), {_id: 42, a: "one"});
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_prepare_transaction.js b/jstests/replsets/rollback_prepare_transaction.js
index 13bb47d6c6b..f7ffb400878 100644
--- a/jstests/replsets/rollback_prepare_transaction.js
+++ b/jstests/replsets/rollback_prepare_transaction.js
@@ -4,96 +4,96 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
- load("jstests/replsets/libs/rollback_files.js");
-
- const rollbackTest = new RollbackTest();
- const rollbackNode = rollbackTest.getPrimary();
-
- const testDB = rollbackNode.getDB("test");
- const collName = "rollback_prepare_transaction";
- const testColl = testDB.getCollection(collName);
-
- // We perform some operations on the collection aside from starting and preparing a transaction
- // in order to cause the count diff computed by replication to be non-zero.
- assert.commandWorked(testColl.insert({_id: "a"}));
-
- // Start two separate sessions for running transactions. On 'session1', we will run a prepared
- // transaction whose commit operation gets rolled back, and on 'session2', we will run a
- // prepared transaction whose prepare operation gets rolled back.
- const session1 = rollbackNode.startSession();
- const session1DB = session1.getDatabase(testDB.getName());
- const session1Coll = session1DB.getCollection(collName);
-
- const session2 = rollbackNode.startSession();
- const session2DB = session2.getDatabase(testDB.getName());
- const session2Coll = session2DB.getCollection(collName);
-
- // Prepare a transaction whose commit operation will be rolled back.
- session1.startTransaction();
- assert.commandWorked(session1Coll.insert({_id: "t2_a"}));
- assert.commandWorked(session1Coll.insert({_id: "t2_b"}));
- assert.commandWorked(session1Coll.insert({_id: "t2_c"}));
- let prepareTs = PrepareHelpers.prepareTransaction(session1);
-
- rollbackTest.transitionToRollbackOperations();
-
- // The following operations will be rolled-back.
- assert.commandWorked(testColl.insert({_id: "b"}));
-
- session2.startTransaction();
- assert.commandWorked(session2Coll.insert({_id: "t1"}));
-
- // Use w: 1 to simulate a prepare that will not become majority-committed.
- PrepareHelpers.prepareTransaction(session2, {w: 1});
-
- // Commit the transaction that was prepared before the common point.
- PrepareHelpers.commitTransaction(session1, prepareTs);
-
- // This is not exactly correct, but characterizes the current behavior of fastcount, which
- // includes the prepared but uncommitted transaction in the collection count.
- assert.eq(6, testColl.count());
-
- // Check the visible documents.
- arrayEq([{_id: "a"}, {_id: "b"}, {_id: "t2_a"}, {_id: "t2_b"}, {_id: "t2_c"}],
- testColl.find().toArray());
-
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- // Skip consistency checks so they don't conflict with the prepared transaction.
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
-
- // Both the regular insert and prepared insert should be rolled-back.
- assert.sameMembers([{_id: "a"}], testColl.find().toArray());
-
- // Confirm that the rollback wrote deleted documents to a file.
- const replTest = rollbackTest.getTestFixture();
- const expectedDocs = [{_id: "b"}, {_id: "t2_a"}, {_id: "t2_b"}, {_id: "t2_c"}];
- checkRollbackFiles(replTest.getDbPath(rollbackNode), testColl.getFullName(), expectedDocs);
-
- let adminDB = rollbackTest.getPrimary().getDB("admin");
-
- // Since we rolled back the prepared transaction on session2, retrying the prepareTransaction
- // command on this session should fail with a NoSuchTransaction error.
- assert.commandFailedWithCode(adminDB.adminCommand({
- prepareTransaction: 1,
- lsid: session2.getSessionId(),
- txnNumber: session2.getTxnNumber_forTesting(),
- autocommit: false
- }),
- ErrorCodes.NoSuchTransaction);
-
- // Allow the test to complete by aborting the left over prepared transaction.
- jsTestLog("Aborting the prepared transaction on session " + tojson(session1.getSessionId()));
- assert.commandWorked(adminDB.adminCommand({
- abortTransaction: 1,
- lsid: session1.getSessionId(),
- txnNumber: session1.getTxnNumber_forTesting(),
- autocommit: false
- }));
-
- rollbackTest.stop();
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_files.js");
+
+const rollbackTest = new RollbackTest();
+const rollbackNode = rollbackTest.getPrimary();
+
+const testDB = rollbackNode.getDB("test");
+const collName = "rollback_prepare_transaction";
+const testColl = testDB.getCollection(collName);
+
+// We perform some operations on the collection aside from starting and preparing a transaction
+// in order to cause the count diff computed by replication to be non-zero.
+assert.commandWorked(testColl.insert({_id: "a"}));
+
+// Start two separate sessions for running transactions. On 'session1', we will run a prepared
+// transaction whose commit operation gets rolled back, and on 'session2', we will run a
+// prepared transaction whose prepare operation gets rolled back.
+const session1 = rollbackNode.startSession();
+const session1DB = session1.getDatabase(testDB.getName());
+const session1Coll = session1DB.getCollection(collName);
+
+const session2 = rollbackNode.startSession();
+const session2DB = session2.getDatabase(testDB.getName());
+const session2Coll = session2DB.getCollection(collName);
+
+// Prepare a transaction whose commit operation will be rolled back.
+session1.startTransaction();
+assert.commandWorked(session1Coll.insert({_id: "t2_a"}));
+assert.commandWorked(session1Coll.insert({_id: "t2_b"}));
+assert.commandWorked(session1Coll.insert({_id: "t2_c"}));
+let prepareTs = PrepareHelpers.prepareTransaction(session1);
+
+rollbackTest.transitionToRollbackOperations();
+
+// The following operations will be rolled-back.
+assert.commandWorked(testColl.insert({_id: "b"}));
+
+session2.startTransaction();
+assert.commandWorked(session2Coll.insert({_id: "t1"}));
+
+// Use w: 1 to simulate a prepare that will not become majority-committed.
+PrepareHelpers.prepareTransaction(session2, {w: 1});
+
+// Commit the transaction that was prepared before the common point.
+PrepareHelpers.commitTransaction(session1, prepareTs);
+
+// This is not exactly correct, but characterizes the current behavior of fastcount, which
+// includes the prepared but uncommitted transaction in the collection count.
+assert.eq(6, testColl.count());
+
+// Check the visible documents.
+arrayEq([{_id: "a"}, {_id: "b"}, {_id: "t2_a"}, {_id: "t2_b"}, {_id: "t2_c"}],
+ testColl.find().toArray());
+
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+// Skip consistency checks so they don't conflict with the prepared transaction.
+rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+
+// Both the regular insert and prepared insert should be rolled-back.
+assert.sameMembers([{_id: "a"}], testColl.find().toArray());
+
+// Confirm that the rollback wrote deleted documents to a file.
+const replTest = rollbackTest.getTestFixture();
+const expectedDocs = [{_id: "b"}, {_id: "t2_a"}, {_id: "t2_b"}, {_id: "t2_c"}];
+checkRollbackFiles(replTest.getDbPath(rollbackNode), testColl.getFullName(), expectedDocs);
+
+let adminDB = rollbackTest.getPrimary().getDB("admin");
+
+// Since we rolled back the prepared transaction on session2, retrying the prepareTransaction
+// command on this session should fail with a NoSuchTransaction error.
+assert.commandFailedWithCode(adminDB.adminCommand({
+ prepareTransaction: 1,
+ lsid: session2.getSessionId(),
+ txnNumber: session2.getTxnNumber_forTesting(),
+ autocommit: false
+}),
+ ErrorCodes.NoSuchTransaction);
+
+// Allow the test to complete by aborting the left over prepared transaction.
+jsTestLog("Aborting the prepared transaction on session " + tojson(session1.getSessionId()));
+assert.commandWorked(adminDB.adminCommand({
+ abortTransaction: 1,
+ lsid: session1.getSessionId(),
+ txnNumber: session1.getTxnNumber_forTesting(),
+ autocommit: false
+}));
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js b/jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js
index 22144fecc1e..474d8246f93 100644
--- a/jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js
+++ b/jstests/replsets/rollback_reconstructs_transactions_prepared_before_stable.js
@@ -6,107 +6,107 @@
*/
(function() {
- "use strict";
- load("jstests/aggregation/extras/utils.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
-
- const dbName = "test";
- const collName = "rollback_reconstructs_transactions_prepared_before_stable";
-
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
-
- // Create collection we're using beforehand.
- const testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName}));
-
- // Start a session on the primary.
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- let sessionColl = sessionDB.getCollection(collName);
-
- assert.commandWorked(sessionColl.insert({_id: 0}));
-
- // Prepare the transaction on the session.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {a: 1}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Fastcount reflects the insert of a prepared transaction.
- assert.eq(testColl.count(), 2);
-
- jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
- // Doing a majority write after preparing the transaction ensures that the stable timestamp is
- // past the prepare timestamp because this write must be in the committed snapshot.
- assert.commandWorked(
- testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
-
- // Fastcount reflects the insert of a prepared transaction.
- assert.eq(testColl.count(), 3);
-
- // Check that we have one transaction in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
-
- // The transaction should still be prepared after going through rollback.
- rollbackTest.transitionToRollbackOperations();
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
-
- // Make sure there is still one transaction in the transactions table. This is because the
- // entry in the transactions table is made durable when a transaction is prepared.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
-
- // Fastcount reflects the insert of the prepared transaction because was put back into prepare
- // at the end of rollback.
- assert.eq(testColl.count(), 3);
-
- // Make sure we cannot see the writes from the prepared transaction yet.
- arrayEq(testColl.find().toArray(), [{_id: 0}, {_id: 2}]);
-
- // Get the correct primary after the topology changes.
- primary = rollbackTest.getPrimary();
- rollbackTest.awaitReplication();
-
- // Make sure we can successfully commit the recovered prepared transaction.
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- // The transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session.setTxnNumber_forTesting(0);
- const txnNumber = session.getTxnNumber_forTesting();
-
- // Make sure we cannot add any operations to a prepared transaction.
- assert.commandFailedWithCode(sessionDB.runCommand({
- insert: collName,
- txnNumber: NumberLong(txnNumber),
- documents: [{_id: 10}],
- autocommit: false,
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- // Make sure that writing to a document that was updated in the prepared transaction causes
- // a write conflict.
- assert.commandFailedWithCode(
- sessionDB.runCommand(
- {update: collName, updates: [{q: {_id: 0}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- // Commit the transaction.
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
-
- // Make sure we can see the effects of the prepared transaction.
- arrayEq(testColl.find().toArray(), [{_id: 0, a: 1}, {_id: 1}, {_id: 2}]);
- assert.eq(testColl.count(), 3);
-
- rollbackTest.stop();
+"use strict";
+load("jstests/aggregation/extras/utils.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
+
+const dbName = "test";
+const collName = "rollback_reconstructs_transactions_prepared_before_stable";
+
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
+
+// Create collection we're using beforehand.
+const testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+assert.commandWorked(testDB.runCommand({create: collName}));
+
+// Start a session on the primary.
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+let sessionColl = sessionDB.getCollection(collName);
+
+assert.commandWorked(sessionColl.insert({_id: 0}));
+
+// Prepare the transaction on the session.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {a: 1}}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Fastcount reflects the insert of a prepared transaction.
+assert.eq(testColl.count(), 2);
+
+jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
+// Doing a majority write after preparing the transaction ensures that the stable timestamp is
+// past the prepare timestamp because this write must be in the committed snapshot.
+assert.commandWorked(
+ testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
+
+// Fastcount reflects the insert of a prepared transaction.
+assert.eq(testColl.count(), 3);
+
+// Check that we have one transaction in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
+
+// The transaction should still be prepared after going through rollback.
+rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations({skipDataConsistencyChecks: true});
+
+// Make sure there is still one transaction in the transactions table. This is because the
+// entry in the transactions table is made durable when a transaction is prepared.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
+
+// Fastcount reflects the insert of the prepared transaction because was put back into prepare
+// at the end of rollback.
+assert.eq(testColl.count(), 3);
+
+// Make sure we cannot see the writes from the prepared transaction yet.
+arrayEq(testColl.find().toArray(), [{_id: 0}, {_id: 2}]);
+
+// Get the correct primary after the topology changes.
+primary = rollbackTest.getPrimary();
+rollbackTest.awaitReplication();
+
+// Make sure we can successfully commit the recovered prepared transaction.
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+// The transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session.setTxnNumber_forTesting(0);
+const txnNumber = session.getTxnNumber_forTesting();
+
+// Make sure we cannot add any operations to a prepared transaction.
+assert.commandFailedWithCode(sessionDB.runCommand({
+ insert: collName,
+ txnNumber: NumberLong(txnNumber),
+ documents: [{_id: 10}],
+ autocommit: false,
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+// Make sure that writing to a document that was updated in the prepared transaction causes
+// a write conflict.
+assert.commandFailedWithCode(
+ sessionDB.runCommand(
+ {update: collName, updates: [{q: {_id: 0}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+// Commit the transaction.
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
+
+// Make sure we can see the effects of the prepared transaction.
+arrayEq(testColl.find().toArray(), [{_id: 0, a: 1}, {_id: 1}, {_id: 2}]);
+assert.eq(testColl.count(), 3);
+
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js b/jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js
index 712f7a8286a..45c0127dab7 100644
--- a/jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js
+++ b/jstests/replsets/rollback_recovery_commit_transaction_before_stable_timestamp.js
@@ -10,94 +10,93 @@
*/
(function() {
- "use strict";
-
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
-
- const dbName = "test";
- const collName = "commit_transaction_rollback_recovery_data_already_applied";
-
- const rollbackTest = new RollbackTest(dbName);
- let primary = rollbackTest.getPrimary();
- let testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- // Construct a large array such that two arrays in the same document are not greater than the
- // 16MB limit, but that three such arrays in the same document are greater than 16MB. This will
- // be helpful in recreating an idempotency issue that exists when applying the operations from
- // a transaction after the data already reflects the transaction.
- const largeArray = new Array(7 * 1024 * 1024).join('x');
- assert.commandWorked(testColl.insert([{_id: 1, "a": largeArray}]));
-
- // Start a transaction in a session that will be prepared and committed before rollback.
- let session = primary.startSession();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"b": largeArray}}));
- assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {"b": 1}}));
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"c": largeArray}}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const recoveryTimestamp =
- assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 2}]})).operationTime;
-
- jsTestLog("Holding back the stable timestamp to right after the prepareTimestamp");
-
- // Hold back the stable timestamp to be right after the prepareTimestamp, but before the
- // commitTransaction oplog entry so that the transaction will be replayed during rollback
- // recovery.
- assert.commandWorked(testDB.adminCommand({
- "configureFailPoint": 'holdStableTimestampAtSpecificTimestamp',
- "mode": 'alwaysOn',
- "data": {"timestamp": recoveryTimestamp}
- }));
-
- // Enable fail point "WTSetOldestTSToStableTS" to prevent lag between stable timestamp and
- // oldest timestamp during rollback recovery. We avoid this lag to test if we can prepare
- // and commit a transaction older than oldest timestamp.
- assert.commandWorked(
- testDB.adminCommand({"configureFailPoint": 'WTSetOldestTSToStableTS', "mode": 'alwaysOn'}));
-
- jsTestLog("Committing the transaction");
-
- // Since this transaction is committed after the last snapshot, this commit oplog entry will be
- // replayed during rollback recovery.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- // During rollback, the data restored via rollback to stable timestamp should not reflect the
- // transaction. If not, replaying the commit oplog entry during rollback recovery would throw
- // BSONTooLarge exception.
- rollbackTest.transitionToRollbackOperations();
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- try {
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- } finally {
- assert.commandWorked(primary.adminCommand(
- {configureFailPoint: 'holdStableTimestampAtSpecificTimestamp', mode: 'off'}));
- }
-
- rollbackTest.transitionToSteadyStateOperations();
-
- primary = rollbackTest.getPrimary();
-
- // Make sure that the data reflects all the operations from the transaction after recovery.
- testDB = primary.getDB(dbName);
- const res = testDB[collName].findOne({_id: 1});
- assert.eq(res, {_id: 1, "a": largeArray, "c": largeArray});
-
- // Make sure that another write on the same document from the transaction has no write conflict.
- // Also, make sure that we can run another transaction after recovery without any problems.
- session = primary.startSession();
- sessionDB = session.getDatabase(dbName);
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
-
- rollbackTest.stop();
-
+"use strict";
+
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
+
+const dbName = "test";
+const collName = "commit_transaction_rollback_recovery_data_already_applied";
+
+const rollbackTest = new RollbackTest(dbName);
+let primary = rollbackTest.getPrimary();
+let testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+// Construct a large array such that two arrays in the same document are not greater than the
+// 16MB limit, but that three such arrays in the same document are greater than 16MB. This will
+// be helpful in recreating an idempotency issue that exists when applying the operations from
+// a transaction after the data already reflects the transaction.
+const largeArray = new Array(7 * 1024 * 1024).join('x');
+assert.commandWorked(testColl.insert([{_id: 1, "a": largeArray}]));
+
+// Start a transaction in a session that will be prepared and committed before rollback.
+let session = primary.startSession();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"b": largeArray}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {"b": 1}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"c": largeArray}}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+const recoveryTimestamp =
+ assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 2}]})).operationTime;
+
+jsTestLog("Holding back the stable timestamp to right after the prepareTimestamp");
+
+// Hold back the stable timestamp to be right after the prepareTimestamp, but before the
+// commitTransaction oplog entry so that the transaction will be replayed during rollback
+// recovery.
+assert.commandWorked(testDB.adminCommand({
+ "configureFailPoint": 'holdStableTimestampAtSpecificTimestamp',
+ "mode": 'alwaysOn',
+ "data": {"timestamp": recoveryTimestamp}
+}));
+
+// Enable fail point "WTSetOldestTSToStableTS" to prevent lag between stable timestamp and
+// oldest timestamp during rollback recovery. We avoid this lag to test if we can prepare
+// and commit a transaction older than oldest timestamp.
+assert.commandWorked(
+ testDB.adminCommand({"configureFailPoint": 'WTSetOldestTSToStableTS', "mode": 'alwaysOn'}));
+
+jsTestLog("Committing the transaction");
+
+// Since this transaction is committed after the last snapshot, this commit oplog entry will be
+// replayed during rollback recovery.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+// During rollback, the data restored via rollback to stable timestamp should not reflect the
+// transaction. If not, replaying the commit oplog entry during rollback recovery would throw
+// BSONTooLarge exception.
+rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+try {
+ rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+} finally {
+ assert.commandWorked(primary.adminCommand(
+ {configureFailPoint: 'holdStableTimestampAtSpecificTimestamp', mode: 'off'}));
+}
+
+rollbackTest.transitionToSteadyStateOperations();
+
+primary = rollbackTest.getPrimary();
+
+// Make sure that the data reflects all the operations from the transaction after recovery.
+testDB = primary.getDB(dbName);
+const res = testDB[collName].findOne({_id: 1});
+assert.eq(res, {_id: 1, "a": largeArray, "c": largeArray});
+
+// Make sure that another write on the same document from the transaction has no write conflict.
+// Also, make sure that we can run another transaction after recovery without any problems.
+session = primary.startSession();
+sessionDB = session.getDatabase(dbName);
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
+
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/rollback_remote_cursor_retry.js b/jstests/replsets/rollback_remote_cursor_retry.js
index 77d8fd5b58a..2e2db8c5df6 100644
--- a/jstests/replsets/rollback_remote_cursor_retry.js
+++ b/jstests/replsets/rollback_remote_cursor_retry.js
@@ -6,46 +6,45 @@
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/rollback_test.js");
- load("jstests/libs/check_log.js");
+"use strict";
+load("jstests/replsets/libs/rollback_test.js");
+load("jstests/libs/check_log.js");
- const testName = "rollback_remote_cursor_retry";
- const dbName = testName;
+const testName = "rollback_remote_cursor_retry";
+const dbName = testName;
- const rollbackTest = new RollbackTest(testName);
+const rollbackTest = new RollbackTest(testName);
- const replSet = rollbackTest.getTestFixture();
+const replSet = rollbackTest.getTestFixture();
- replSet.awaitReplication();
+replSet.awaitReplication();
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- const syncSource = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+const syncSource = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- // This failpoint is used to make sure that we have started rollback before turning on
- // 'failCommand'. Otherwise, we would be failing the 'find' command that we issue against
- // the sync source before we decide to go into rollback.
- assert.commandWorked(rollbackNode.adminCommand(
- {configureFailPoint: "rollbackHangBeforeStart", mode: "alwaysOn"}));
+// This failpoint is used to make sure that we have started rollback before turning on
+// 'failCommand'. Otherwise, we would be failing the 'find' command that we issue against
+// the sync source before we decide to go into rollback.
+assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "rollbackHangBeforeStart", mode: "alwaysOn"}));
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- // Ensure that we've hit the failpoint before moving on.
- checkLog.contains(rollbackNode, "rollback - rollbackHangBeforeStart fail point enabled");
+// Ensure that we've hit the failpoint before moving on.
+checkLog.contains(rollbackNode, "rollback - rollbackHangBeforeStart fail point enabled");
- // Fail the 'find' command exactly twice.
- jsTestLog("Failing the next two 'find' commands.");
- assert.commandWorked(syncSource.adminCommand({
- configureFailPoint: "failCommand",
- data: {errorCode: 279, failInternalCommands: true, failCommands: ["find"]},
- mode: {times: 2}
- }));
+// Fail the 'find' command exactly twice.
+jsTestLog("Failing the next two 'find' commands.");
+assert.commandWorked(syncSource.adminCommand({
+ configureFailPoint: "failCommand",
+ data: {errorCode: 279, failInternalCommands: true, failCommands: ["find"]},
+ mode: {times: 2}
+}));
- // Let rollback proceed.
- assert.commandWorked(
- rollbackNode.adminCommand({configureFailPoint: "rollbackHangBeforeStart", mode: "off"}));
-
- rollbackTest.transitionToSteadyStateOperations();
- rollbackTest.stop();
+// Let rollback proceed.
+assert.commandWorked(
+ rollbackNode.adminCommand({configureFailPoint: "rollbackHangBeforeStart", mode: "off"}));
+rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_rename_collection_on_sync_source.js b/jstests/replsets/rollback_rename_collection_on_sync_source.js
index 0f781eb6b8d..be03faa94bf 100644
--- a/jstests/replsets/rollback_rename_collection_on_sync_source.js
+++ b/jstests/replsets/rollback_rename_collection_on_sync_source.js
@@ -5,48 +5,47 @@
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- let dbName = "rollback_rename_collection_on_sync_source";
- let sourceCollName = "sourceColl";
- let destCollName = "destColl";
+let dbName = "rollback_rename_collection_on_sync_source";
+let sourceCollName = "sourceColl";
+let destCollName = "destColl";
- let doc1 = {x: 1};
- let doc2 = {x: 2};
+let doc1 = {x: 1};
+let doc2 = {x: 2};
- let CommonOps = (node) => {
- // Insert a document that will exist on the sync source and rollback node.
- assert.writeOK(node.getDB(dbName)[sourceCollName].insert(doc1));
- };
+let CommonOps = (node) => {
+ // Insert a document that will exist on the sync source and rollback node.
+ assert.writeOK(node.getDB(dbName)[sourceCollName].insert(doc1));
+};
- let RollbackOps = (node) => {
- // Delete the document on rollback node so it will be refetched from sync source.
- assert.writeOK(node.getDB(dbName)[sourceCollName].remove(doc1));
- };
+let RollbackOps = (node) => {
+ // Delete the document on rollback node so it will be refetched from sync source.
+ assert.writeOK(node.getDB(dbName)[sourceCollName].remove(doc1));
+};
- let SyncSourceOps = (node) => {
- // Rename the original collection on the sync source.
- assert.commandWorked(node.getDB(dbName)[sourceCollName].renameCollection(destCollName));
- assert.writeOK(node.getDB(dbName)[destCollName].insert(doc2));
- };
+let SyncSourceOps = (node) => {
+ // Rename the original collection on the sync source.
+ assert.commandWorked(node.getDB(dbName)[sourceCollName].renameCollection(destCollName));
+ assert.writeOK(node.getDB(dbName)[destCollName].insert(doc2));
+};
- // Set up Rollback Test.
- let rollbackTest = new RollbackTest();
- CommonOps(rollbackTest.getPrimary());
+// Set up Rollback Test.
+let rollbackTest = new RollbackTest();
+CommonOps(rollbackTest.getPrimary());
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
- let syncSourceNode = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- SyncSourceOps(syncSourceNode);
+let syncSourceNode = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+SyncSourceOps(syncSourceNode);
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check the replica set.
- rollbackTest.stop();
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+// Check the replica set.
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_rename_count.js b/jstests/replsets/rollback_rename_count.js
index f1376fe7dc5..51fa88f5324 100644
--- a/jstests/replsets/rollback_rename_count.js
+++ b/jstests/replsets/rollback_rename_count.js
@@ -2,58 +2,58 @@
* Tests that rollback corrects fastcounts even when collections are renamed.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- const testName = "rollback_rename_count";
- const dbName = testName;
+const testName = "rollback_rename_count";
+const dbName = testName;
- const rollbackTest = new RollbackTest(testName);
- const primary = rollbackTest.getPrimary();
- const testDb = primary.getDB(dbName);
+const rollbackTest = new RollbackTest(testName);
+const primary = rollbackTest.getPrimary();
+const testDb = primary.getDB(dbName);
- // This collection is non-empty at the stable timestamp.
- const fromCollName1 = "fromCollName1";
- const toCollName1 = "toCollName1";
- let coll1 = testDb.getCollection(fromCollName1);
- assert.commandWorked(coll1.insert({a: 1}));
+// This collection is non-empty at the stable timestamp.
+const fromCollName1 = "fromCollName1";
+const toCollName1 = "toCollName1";
+let coll1 = testDb.getCollection(fromCollName1);
+assert.commandWorked(coll1.insert({a: 1}));
- rollbackTest.awaitLastOpCommitted();
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+rollbackTest.awaitLastOpCommitted();
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
- assert.commandWorked(coll1.renameCollection(toCollName1));
- coll1 = testDb.getCollection(toCollName1);
- assert.commandWorked(coll1.insert({b: 1}));
+assert.commandWorked(coll1.renameCollection(toCollName1));
+coll1 = testDb.getCollection(toCollName1);
+assert.commandWorked(coll1.insert({b: 1}));
- // This collection is empty at the stable timestamp.
- const fromCollName2 = "fromCollName2";
- const toCollName2 = "toCollName2";
- let coll2 = testDb.getCollection(fromCollName2);
- assert.commandWorked(coll2.insert({c: 1}));
- assert.commandWorked(coll2.renameCollection(toCollName2));
- coll2 = testDb.getCollection(toCollName2);
- assert.commandWorked(coll2.insert({d: 1}));
+// This collection is empty at the stable timestamp.
+const fromCollName2 = "fromCollName2";
+const toCollName2 = "toCollName2";
+let coll2 = testDb.getCollection(fromCollName2);
+assert.commandWorked(coll2.insert({c: 1}));
+assert.commandWorked(coll2.renameCollection(toCollName2));
+coll2 = testDb.getCollection(toCollName2);
+assert.commandWorked(coll2.insert({d: 1}));
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- assert.commandWorked(coll1.insert({e: 1}));
+assert.commandWorked(coll1.insert({e: 1}));
- assert.eq(coll1.find().itcount(), 3);
- assert.eq(coll2.find().itcount(), 2);
+assert.eq(coll1.find().itcount(), 3);
+assert.eq(coll2.find().itcount(), 2);
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- try {
- rollbackTest.transitionToSteadyStateOperations();
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+try {
+ rollbackTest.transitionToSteadyStateOperations();
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
- assert.eq(coll1.find().itcount(), 2);
- assert.eq(coll2.find().itcount(), 2);
+assert.eq(coll1.find().itcount(), 2);
+assert.eq(coll2.find().itcount(), 2);
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_time_limit_param.js b/jstests/replsets/rollback_time_limit_param.js
index 879876dbfcf..345e38f5e89 100644
--- a/jstests/replsets/rollback_time_limit_param.js
+++ b/jstests/replsets/rollback_time_limit_param.js
@@ -7,48 +7,47 @@
(function() {
- "use strict";
+"use strict";
- const testName = "rollback_time_limit_param";
+const testName = "rollback_time_limit_param";
- // Make sure that we reject non-positive values for this parameter set on startup.
- let rstWithBadStartupOptions = new ReplSetTest(
- {name: testName, nodes: 1, nodeOptions: {setParameter: "rollbackTimeLimitSecs=-50"}});
+// Make sure that we reject non-positive values for this parameter set on startup.
+let rstWithBadStartupOptions = new ReplSetTest(
+ {name: testName, nodes: 1, nodeOptions: {setParameter: "rollbackTimeLimitSecs=-50"}});
- assert.throws(function() {
- rstWithBadStartupOptions.startSet();
+assert.throws(function() {
+ rstWithBadStartupOptions.startSet();
+});
- });
+assert(rawMongoProgramOutput().match("Bad value for parameter \"rollbackTimeLimitSecs\""),
+ "failed to reject bad value for parameter");
- assert(rawMongoProgramOutput().match("Bad value for parameter \"rollbackTimeLimitSecs\""),
- "failed to reject bad value for parameter");
+// Now initialize the same parameter correctly on startup.
+let rst = new ReplSetTest(
+ {name: testName, nodes: 1, nodeOptions: {setParameter: "rollbackTimeLimitSecs=1000"}});
+rst.startSet();
+rst.initiate();
- // Now initialize the same parameter correctly on startup.
- let rst = new ReplSetTest(
- {name: testName, nodes: 1, nodeOptions: {setParameter: "rollbackTimeLimitSecs=1000"}});
- rst.startSet();
- rst.initiate();
+let primary = rst.getPrimary();
- let primary = rst.getPrimary();
+// Check that the value of 'rollbackTimeLimitSecs' was initialized correctly on startup.
+let valueSetOnStartup =
+ assert.commandWorked(primary.adminCommand({getParameter: 1, rollbackTimeLimitSecs: 1}))
+ .rollbackTimeLimitSecs;
+assert.eq(NumberLong(1000), valueSetOnStartup);
- // Check that the value of 'rollbackTimeLimitSecs' was initialized correctly on startup.
- let valueSetOnStartup =
- assert.commandWorked(primary.adminCommand({getParameter: 1, rollbackTimeLimitSecs: 1}))
- .rollbackTimeLimitSecs;
- assert.eq(NumberLong(1000), valueSetOnStartup);
+// Check that the value of 'rollbackTimeLimitSecs' was set correctly at runtime.
+assert.commandWorked(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: 2000}));
+let valueSetAtRuntime =
+ assert.commandWorked(primary.adminCommand({getParameter: 1, rollbackTimeLimitSecs: 1}))
+ .rollbackTimeLimitSecs;
+assert.eq(NumberLong(2000), valueSetAtRuntime);
- // Check that the value of 'rollbackTimeLimitSecs' was set correctly at runtime.
- assert.commandWorked(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: 2000}));
- let valueSetAtRuntime =
- assert.commandWorked(primary.adminCommand({getParameter: 1, rollbackTimeLimitSecs: 1}))
- .rollbackTimeLimitSecs;
- assert.eq(NumberLong(2000), valueSetAtRuntime);
+// Make sure that we reject non-positive values for this parameter set at runtime.
+assert.commandFailedWithCode(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: -5}),
+ ErrorCodes.BadValue);
+assert.commandFailedWithCode(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: 0}),
+ ErrorCodes.BadValue);
- // Make sure that we reject non-positive values for this parameter set at runtime.
- assert.commandFailedWithCode(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: -5}),
- ErrorCodes.BadValue);
- assert.commandFailedWithCode(primary.adminCommand({setParameter: 1, rollbackTimeLimitSecs: 0}),
- ErrorCodes.BadValue);
-
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/rollback_transaction_table.js b/jstests/replsets/rollback_transaction_table.js
index e44894a5b4c..3c1a18b436e 100644
--- a/jstests/replsets/rollback_transaction_table.js
+++ b/jstests/replsets/rollback_transaction_table.js
@@ -17,218 +17,216 @@
* - A record for the third session id was created during oplog replay.
*/
(function() {
- "use strict";
-
- // This test drops a collection in the config database, which is not allowed under a session. It
- // also manually simulates a session, which is not compatible with implicit sessions.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/retryable_writes_util.js");
-
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
-
- load("jstests/replsets/rslib.js");
-
- function assertSameRecordOnBothConnections(primary, secondary, lsid) {
- let primaryRecord = primary.getDB("config").transactions.findOne({"_id.id": lsid.id});
- let secondaryRecord = secondary.getDB("config").transactions.findOne({"_id.id": lsid.id});
-
- jsTestLog("Primary record: " + tojson(primaryRecord));
- jsTestLog("Secondary record: " + tojson(secondaryRecord));
-
- assert.eq(bsonWoCompare(primaryRecord, secondaryRecord),
- 0,
- "expected transaction records: " + tojson(primaryRecord) + " and " +
- tojson(secondaryRecord) + " to be the same for lsid: " + tojson(lsid));
- }
-
- function assertRecordHasTxnNumber(conn, lsid, txnNum) {
- let recordTxnNum = conn.getDB("config").transactions.findOne({"_id.id": lsid.id}).txnNum;
- assert.eq(recordTxnNum,
- txnNum,
- "expected node: " + conn + " to have txnNumber: " + txnNum + " for session id: " +
- lsid + " - instead found: " + recordTxnNum);
- }
-
- let testName = "rollback_transaction_table";
- let dbName = "test";
-
- let replTest = new ReplSetTest({
- name: testName,
- nodes: [
- // Primary flops between nodes 0 and 1.
- {},
- {},
- // Arbiter to sway elections.
- {rsConfig: {arbiterOnly: true}}
- ],
- useBridge: true,
- });
- let nodes = replTest.startSet();
- replTest.initiate();
-
- let downstream = nodes[0];
- let upstream = nodes[1];
- let arbiter = nodes[2];
-
- jsTestLog("Making sure 'downstream node' is the primary node.");
- assert.eq(downstream, replTest.getPrimary());
-
- // Renaming or dropping the transactions collection shouldn't crash if command is not rolled
- // back.
- assert.commandWorked(downstream.getDB("config").transactions.renameCollection("foo"));
- assert.commandWorked(downstream.getDB("config").foo.renameCollection("transactions"));
- assert(downstream.getDB("config").transactions.drop());
- assert.commandWorked(downstream.getDB("config").createCollection("transactions"));
-
- jsTestLog("Running a transaction on the 'downstream node' and waiting for it to replicate.");
- let firstLsid = {id: UUID()};
- let firstCmd = {
- insert: "foo",
- documents: [{_id: 10}, {_id: 30}],
- ordered: false,
- lsid: firstLsid,
- txnNumber: NumberLong(5)
- };
-
- assert.commandWorked(downstream.getDB(dbName).runCommand(firstCmd));
- replTest.awaitReplication();
-
- // Both data bearing nodes should have the same record for the first session id.
- assertSameRecordOnBothConnections(downstream, upstream, firstLsid);
-
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(5));
-
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
-
- jsTestLog(
- "Creating a partition between 'the downstream and arbiter node' and 'the upstream node.'");
- downstream.disconnect(upstream);
- arbiter.disconnect(upstream);
-
- jsTestLog(
- "Running a higher transaction for the existing session on only the 'downstream node.'");
- let higherTxnFirstCmd = {
- insert: "foo",
- documents: [{_id: 50}],
- ordered: false,
- lsid: firstLsid,
- txnNumber: NumberLong(20)
- };
-
- assert.commandWorked(downstream.getDB(dbName).runCommand(higherTxnFirstCmd));
-
- // Now the data bearing nodes should have different transaction table records for the first
- // session id.
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
-
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
-
- jsTestLog("Running a transaction for a second session on the 'downstream node.'");
- let secondLsid = {id: UUID()};
- let secondCmd = {
- insert: "foo",
- documents: [{_id: 100}, {_id: 200}],
- ordered: false,
- lsid: secondLsid,
- txnNumber: NumberLong(100)
- };
-
- assert.commandWorked(downstream.getDB(dbName).runCommand(secondCmd));
-
- // Only the downstream node should have two transaction table records, one for the first and
- // second session ids.
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
- assertRecordHasTxnNumber(downstream, secondLsid, NumberLong(100));
-
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
-
- // We do not disconnect the downstream node from the arbiter node at the same time as we
- // disconnect it from the upstream node. This prevents a race where the transaction using the
- // second session id must finish before the downstream node steps down from being the primary.
- jsTestLog(
- "Disconnecting the 'downstream node' from the 'arbiter node' and reconnecting the 'upstream node' to the 'arbiter node.'");
- downstream.disconnect(arbiter);
- upstream.reconnect(arbiter);
-
- jsTestLog("Waiting for the 'upstream node' to become the new primary.");
- waitForState(downstream, ReplSetTest.State.SECONDARY);
- waitForState(upstream, ReplSetTest.State.PRIMARY);
- assert.eq(upstream, replTest.getPrimary());
-
- jsTestLog("Running a new transaction for a third session on the 'upstream node.'");
- let thirdLsid = {id: UUID()};
- let thirdCmd = {
- insert: "foo",
- documents: [{_id: 1000}, {_id: 2000}],
- ordered: false,
- lsid: thirdLsid,
- txnNumber: NumberLong(1)
- };
-
- assert.commandWorked(upstream.getDB(dbName).runCommand(thirdCmd));
-
- // Now the upstream node also has two transaction table records, but for the first and third
- // session ids, not the first and second.
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
- assertRecordHasTxnNumber(downstream, secondLsid, NumberLong(100));
-
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 2);
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
- assertRecordHasTxnNumber(upstream, thirdLsid, NumberLong(1));
-
- // Gets the rollback ID of the downstream node before rollback occurs.
- let downstreamRBIDBefore = assert.commandWorked(downstream.adminCommand('replSetGetRBID')).rbid;
-
- jsTestLog("Reconnecting the 'downstream node.'");
- downstream.reconnect(upstream);
- downstream.reconnect(arbiter);
-
- jsTestLog("Waiting for the 'downstream node' to complete rollback.");
- replTest.awaitReplication();
- replTest.awaitSecondaryNodes();
-
- // Ensure that connection to the downstream node is re-established, since the connection should
- // have gotten killed during the downstream node's transition to ROLLBACK state.
- reconnect(downstream);
-
- jsTestLog(
- "Checking the rollback ID of the downstream node to confirm that a rollback occurred.");
- assert.neq(downstreamRBIDBefore,
- assert.commandWorked(downstream.adminCommand('replSetGetRBID')).rbid);
-
- // Verify the record for the first lsid rolled back to its original value, the record for the
- // second lsid was removed, and the record for the third lsid was created during oplog replay.
- jsTestLog("Verifying the transaction collection rolled back properly.");
-
- assertSameRecordOnBothConnections(downstream, upstream, firstLsid);
- assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(5));
- assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
-
- assert.isnull(downstream.getDB("config").transactions.findOne({"_id.id": secondLsid.id}));
- assert.isnull(upstream.getDB("config").transactions.findOne({"_id.id": secondLsid.id}));
-
- assertSameRecordOnBothConnections(downstream, upstream, thirdLsid);
- assertRecordHasTxnNumber(downstream, thirdLsid, NumberLong(1));
- assertRecordHasTxnNumber(upstream, thirdLsid, NumberLong(1));
-
- assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
- assert.eq(upstream.getDB("config").transactions.find().itcount(), 2);
-
- // Confirm the nodes are consistent.
- replTest.checkOplogs();
- replTest.checkReplicatedDataHashes(testName);
- replTest.checkCollectionCounts();
-
- replTest.stopSet();
+"use strict";
+
+// This test drops a collection in the config database, which is not allowed under a session. It
+// also manually simulates a session, which is not compatible with implicit sessions.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/retryable_writes_util.js");
+
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
+
+load("jstests/replsets/rslib.js");
+
+function assertSameRecordOnBothConnections(primary, secondary, lsid) {
+ let primaryRecord = primary.getDB("config").transactions.findOne({"_id.id": lsid.id});
+ let secondaryRecord = secondary.getDB("config").transactions.findOne({"_id.id": lsid.id});
+
+ jsTestLog("Primary record: " + tojson(primaryRecord));
+ jsTestLog("Secondary record: " + tojson(secondaryRecord));
+
+ assert.eq(bsonWoCompare(primaryRecord, secondaryRecord),
+ 0,
+ "expected transaction records: " + tojson(primaryRecord) + " and " +
+ tojson(secondaryRecord) + " to be the same for lsid: " + tojson(lsid));
+}
+
+function assertRecordHasTxnNumber(conn, lsid, txnNum) {
+ let recordTxnNum = conn.getDB("config").transactions.findOne({"_id.id": lsid.id}).txnNum;
+ assert.eq(recordTxnNum,
+ txnNum,
+ "expected node: " + conn + " to have txnNumber: " + txnNum +
+ " for session id: " + lsid + " - instead found: " + recordTxnNum);
+}
+
+let testName = "rollback_transaction_table";
+let dbName = "test";
+
+let replTest = new ReplSetTest({
+ name: testName,
+ nodes: [
+ // Primary flops between nodes 0 and 1.
+ {},
+ {},
+ // Arbiter to sway elections.
+ {rsConfig: {arbiterOnly: true}}
+ ],
+ useBridge: true,
+});
+let nodes = replTest.startSet();
+replTest.initiate();
+
+let downstream = nodes[0];
+let upstream = nodes[1];
+let arbiter = nodes[2];
+
+jsTestLog("Making sure 'downstream node' is the primary node.");
+assert.eq(downstream, replTest.getPrimary());
+
+// Renaming or dropping the transactions collection shouldn't crash if command is not rolled
+// back.
+assert.commandWorked(downstream.getDB("config").transactions.renameCollection("foo"));
+assert.commandWorked(downstream.getDB("config").foo.renameCollection("transactions"));
+assert(downstream.getDB("config").transactions.drop());
+assert.commandWorked(downstream.getDB("config").createCollection("transactions"));
+
+jsTestLog("Running a transaction on the 'downstream node' and waiting for it to replicate.");
+let firstLsid = {id: UUID()};
+let firstCmd = {
+ insert: "foo",
+ documents: [{_id: 10}, {_id: 30}],
+ ordered: false,
+ lsid: firstLsid,
+ txnNumber: NumberLong(5)
+};
+
+assert.commandWorked(downstream.getDB(dbName).runCommand(firstCmd));
+replTest.awaitReplication();
+
+// Both data bearing nodes should have the same record for the first session id.
+assertSameRecordOnBothConnections(downstream, upstream, firstLsid);
+
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(5));
+
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+
+jsTestLog(
+ "Creating a partition between 'the downstream and arbiter node' and 'the upstream node.'");
+downstream.disconnect(upstream);
+arbiter.disconnect(upstream);
+
+jsTestLog("Running a higher transaction for the existing session on only the 'downstream node.'");
+let higherTxnFirstCmd = {
+ insert: "foo",
+ documents: [{_id: 50}],
+ ordered: false,
+ lsid: firstLsid,
+ txnNumber: NumberLong(20)
+};
+
+assert.commandWorked(downstream.getDB(dbName).runCommand(higherTxnFirstCmd));
+
+// Now the data bearing nodes should have different transaction table records for the first
+// session id.
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
+
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+
+jsTestLog("Running a transaction for a second session on the 'downstream node.'");
+let secondLsid = {id: UUID()};
+let secondCmd = {
+ insert: "foo",
+ documents: [{_id: 100}, {_id: 200}],
+ ordered: false,
+ lsid: secondLsid,
+ txnNumber: NumberLong(100)
+};
+
+assert.commandWorked(downstream.getDB(dbName).runCommand(secondCmd));
+
+// Only the downstream node should have two transaction table records, one for the first and
+// second session ids.
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
+assertRecordHasTxnNumber(downstream, secondLsid, NumberLong(100));
+
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 1);
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+
+// We do not disconnect the downstream node from the arbiter node at the same time as we
+// disconnect it from the upstream node. This prevents a race where the transaction using the
+// second session id must finish before the downstream node steps down from being the primary.
+jsTestLog(
+ "Disconnecting the 'downstream node' from the 'arbiter node' and reconnecting the 'upstream node' to the 'arbiter node.'");
+downstream.disconnect(arbiter);
+upstream.reconnect(arbiter);
+
+jsTestLog("Waiting for the 'upstream node' to become the new primary.");
+waitForState(downstream, ReplSetTest.State.SECONDARY);
+waitForState(upstream, ReplSetTest.State.PRIMARY);
+assert.eq(upstream, replTest.getPrimary());
+
+jsTestLog("Running a new transaction for a third session on the 'upstream node.'");
+let thirdLsid = {id: UUID()};
+let thirdCmd = {
+ insert: "foo",
+ documents: [{_id: 1000}, {_id: 2000}],
+ ordered: false,
+ lsid: thirdLsid,
+ txnNumber: NumberLong(1)
+};
+
+assert.commandWorked(upstream.getDB(dbName).runCommand(thirdCmd));
+
+// Now the upstream node also has two transaction table records, but for the first and third
+// session ids, not the first and second.
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(20));
+assertRecordHasTxnNumber(downstream, secondLsid, NumberLong(100));
+
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 2);
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+assertRecordHasTxnNumber(upstream, thirdLsid, NumberLong(1));
+
+// Gets the rollback ID of the downstream node before rollback occurs.
+let downstreamRBIDBefore = assert.commandWorked(downstream.adminCommand('replSetGetRBID')).rbid;
+
+jsTestLog("Reconnecting the 'downstream node.'");
+downstream.reconnect(upstream);
+downstream.reconnect(arbiter);
+
+jsTestLog("Waiting for the 'downstream node' to complete rollback.");
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+// Ensure that connection to the downstream node is re-established, since the connection should
+// have gotten killed during the downstream node's transition to ROLLBACK state.
+reconnect(downstream);
+
+jsTestLog("Checking the rollback ID of the downstream node to confirm that a rollback occurred.");
+assert.neq(downstreamRBIDBefore,
+ assert.commandWorked(downstream.adminCommand('replSetGetRBID')).rbid);
+
+// Verify the record for the first lsid rolled back to its original value, the record for the
+// second lsid was removed, and the record for the third lsid was created during oplog replay.
+jsTestLog("Verifying the transaction collection rolled back properly.");
+
+assertSameRecordOnBothConnections(downstream, upstream, firstLsid);
+assertRecordHasTxnNumber(downstream, firstLsid, NumberLong(5));
+assertRecordHasTxnNumber(upstream, firstLsid, NumberLong(5));
+
+assert.isnull(downstream.getDB("config").transactions.findOne({"_id.id": secondLsid.id}));
+assert.isnull(upstream.getDB("config").transactions.findOne({"_id.id": secondLsid.id}));
+
+assertSameRecordOnBothConnections(downstream, upstream, thirdLsid);
+assertRecordHasTxnNumber(downstream, thirdLsid, NumberLong(1));
+assertRecordHasTxnNumber(upstream, thirdLsid, NumberLong(1));
+
+assert.eq(downstream.getDB("config").transactions.find().itcount(), 2);
+assert.eq(upstream.getDB("config").transactions.find().itcount(), 2);
+
+// Confirm the nodes are consistent.
+replTest.checkOplogs();
+replTest.checkReplicatedDataHashes(testName);
+replTest.checkCollectionCounts();
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/rollback_transactions_count.js b/jstests/replsets/rollback_transactions_count.js
index f965211284a..1aa7ceeef1c 100644
--- a/jstests/replsets/rollback_transactions_count.js
+++ b/jstests/replsets/rollback_transactions_count.js
@@ -5,62 +5,62 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/rollback_test.js");
+load("jstests/replsets/libs/rollback_test.js");
- const testName = "rollback_transactions_count";
- const dbName = testName;
- const collName = "txnCollName";
+const testName = "rollback_transactions_count";
+const dbName = testName;
+const collName = "txnCollName";
- const rollbackTest = new RollbackTest(testName);
- const primary = rollbackTest.getPrimary();
+const rollbackTest = new RollbackTest(testName);
+const primary = rollbackTest.getPrimary();
- const session1 = primary.startSession();
- const sessionDb1 = session1.getDatabase(dbName);
- const sessionColl1 = sessionDb1[collName];
- assert.commandWorked(sessionColl1.insert({a: 1}));
- session1.startTransaction();
- assert.commandWorked(sessionColl1.insert({b: 1}));
- assert.commandWorked(session1.commitTransaction_forTesting());
+const session1 = primary.startSession();
+const sessionDb1 = session1.getDatabase(dbName);
+const sessionColl1 = sessionDb1[collName];
+assert.commandWorked(sessionColl1.insert({a: 1}));
+session1.startTransaction();
+assert.commandWorked(sessionColl1.insert({b: 1}));
+assert.commandWorked(session1.commitTransaction_forTesting());
- rollbackTest.awaitLastOpCommitted();
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
+rollbackTest.awaitLastOpCommitted();
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'}));
- const session2 = primary.startSession();
- const sessionDb2 = session2.getDatabase(dbName);
- const sessionColl2 = sessionDb2[collName];
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({c: 1}));
- assert.commandWorked(session2.commitTransaction_forTesting());
+const session2 = primary.startSession();
+const sessionDb2 = session2.getDatabase(dbName);
+const sessionColl2 = sessionDb2[collName];
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({c: 1}));
+assert.commandWorked(session2.commitTransaction_forTesting());
- rollbackTest.transitionToRollbackOperations();
+rollbackTest.transitionToRollbackOperations();
- session2.startTransaction();
- assert.commandWorked(sessionColl2.insert({d: 1}));
- assert.commandWorked(session2.commitTransaction_forTesting());
+session2.startTransaction();
+assert.commandWorked(sessionColl2.insert({d: 1}));
+assert.commandWorked(session2.commitTransaction_forTesting());
- const session3 = primary.startSession();
- const sessionDb3 = session3.getDatabase(dbName);
- const sessionColl3 = sessionDb3[collName];
- session3.startTransaction();
- assert.commandWorked(sessionColl3.insert({e: 1}));
- assert.commandWorked(session3.commitTransaction_forTesting());
+const session3 = primary.startSession();
+const sessionDb3 = session3.getDatabase(dbName);
+const sessionColl3 = sessionDb3[collName];
+session3.startTransaction();
+assert.commandWorked(sessionColl3.insert({e: 1}));
+assert.commandWorked(session3.commitTransaction_forTesting());
- assert.eq(sessionColl1.find().itcount(), 5);
+assert.eq(sessionColl1.find().itcount(), 5);
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- try {
- rollbackTest.transitionToSteadyStateOperations();
- } finally {
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
- }
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+try {
+ rollbackTest.transitionToSteadyStateOperations();
+} finally {
+ assert.commandWorked(
+ primary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'}));
+}
- assert.eq(sessionColl1.find().itcount(), 3);
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
+assert.eq(sessionColl1.find().itcount(), 3);
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 2);
- rollbackTest.stop();
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_unprepared_transactions.js b/jstests/replsets/rollback_unprepared_transactions.js
index fdd286399d6..b2bfaa76405 100644
--- a/jstests/replsets/rollback_unprepared_transactions.js
+++ b/jstests/replsets/rollback_unprepared_transactions.js
@@ -3,58 +3,58 @@
* @tags: [requires_replication, requires_wiredtiger]
*/
(function() {
- 'use strict';
-
- load('jstests/libs/check_log.js');
- load('jstests/replsets/libs/rollback_test.js');
- load('jstests/replsets/libs/rollback_files.js');
-
- // Operations that will be present on both nodes, before the common point.
- const dbName = 'test';
- const collName = 'test.t';
- const collNameShort = 't';
- let CommonOps = (node) => {
- const coll = node.getCollection(collName);
- const mydb = coll.getDB();
- assert.commandWorked(coll.insert({_id: 0}));
- };
-
- // Operations that will be performed on the rollback node past the common point.
- let RollbackOps = (node) => {
- const session = node.startSession();
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collNameShort);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "a"}));
- assert.commandWorked(sessionColl.insert({_id: "b"}));
- assert.commandWorked(sessionColl.insert({_id: "c"}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
- };
-
- // Set up Rollback Test.
- const rollbackTest = new RollbackTest();
-
- CommonOps(rollbackTest.getPrimary());
-
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check collection count.
- const primary = rollbackTest.getPrimary();
- const coll = primary.getCollection(collName);
- assert.eq(1, coll.find().itcount());
- assert.eq(1, coll.count());
-
- // Confirm that the rollback wrote deleted documents to a file.
- const replTest = rollbackTest.getTestFixture();
- const expectedDocs = [{_id: "a"}, {_id: "b"}, {_id: "c"}];
- checkRollbackFiles(replTest.getDbPath(rollbackNode), collName, expectedDocs);
-
- rollbackTest.stop();
+'use strict';
+
+load('jstests/libs/check_log.js');
+load('jstests/replsets/libs/rollback_test.js');
+load('jstests/replsets/libs/rollback_files.js');
+
+// Operations that will be present on both nodes, before the common point.
+const dbName = 'test';
+const collName = 'test.t';
+const collNameShort = 't';
+let CommonOps = (node) => {
+ const coll = node.getCollection(collName);
+ const mydb = coll.getDB();
+ assert.commandWorked(coll.insert({_id: 0}));
+};
+
+// Operations that will be performed on the rollback node past the common point.
+let RollbackOps = (node) => {
+ const session = node.startSession();
+ const sessionDB = session.getDatabase(dbName);
+ const sessionColl = sessionDB.getCollection(collNameShort);
+ session.startTransaction();
+ assert.commandWorked(sessionColl.insert({_id: "a"}));
+ assert.commandWorked(sessionColl.insert({_id: "b"}));
+ assert.commandWorked(sessionColl.insert({_id: "c"}));
+ assert.commandWorked(session.commitTransaction_forTesting());
+ session.endSession();
+};
+
+// Set up Rollback Test.
+const rollbackTest = new RollbackTest();
+
+CommonOps(rollbackTest.getPrimary());
+
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// Check collection count.
+const primary = rollbackTest.getPrimary();
+const coll = primary.getCollection(collName);
+assert.eq(1, coll.find().itcount());
+assert.eq(1, coll.count());
+
+// Confirm that the rollback wrote deleted documents to a file.
+const replTest = rollbackTest.getTestFixture();
+const expectedDocs = [{_id: "a"}, {_id: "b"}, {_id: "c"}];
+checkRollbackFiles(replTest.getDbPath(rollbackNode), collName, expectedDocs);
+
+rollbackTest.stop();
})();
diff --git a/jstests/replsets/rollback_via_refetch_commit_transaction.js b/jstests/replsets/rollback_via_refetch_commit_transaction.js
index 380bcdb4fd2..317fc7b97f8 100644
--- a/jstests/replsets/rollback_via_refetch_commit_transaction.js
+++ b/jstests/replsets/rollback_via_refetch_commit_transaction.js
@@ -10,71 +10,73 @@
TestData.skipCheckDBHashes = true;
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/replsets/libs/rollback_test.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/replsets/libs/rollback_test.js");
- const dbName = "test";
- const collName = "rollback_via_refetch_commit_transaction";
+const dbName = "test";
+const collName = "rollback_via_refetch_commit_transaction";
- // Provide RollbackTest with custom ReplSetTest so we can set forceRollbackViaRefetch.
- const rst = new ReplSetTest({
- name: collName,
- nodes: 3,
- useBridge: true,
- nodeOptions: {setParameter: "forceRollbackViaRefetch=true"}
- });
+// Provide RollbackTest with custom ReplSetTest so we can set forceRollbackViaRefetch.
+const rst = new ReplSetTest({
+ name: collName,
+ nodes: 3,
+ useBridge: true,
+ nodeOptions: {setParameter: "forceRollbackViaRefetch=true"}
+});
- rst.startSet();
- const config = rst.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- rst.initiate(config);
+rst.startSet();
+const config = rst.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+rst.initiate(config);
- const primaryNode = rst.getPrimary();
+const primaryNode = rst.getPrimary();
- // Create collection that exists on the sync source and rollback node.
- assert.commandWorked(
- primaryNode.getDB(dbName).runCommand({create: collName, writeConcern: {w: 2}}));
+// Create collection that exists on the sync source and rollback node.
+assert.commandWorked(
+ primaryNode.getDB(dbName).runCommand({create: collName, writeConcern: {w: 2}}));
- // Issue a 'prepareTransaction' command just to the current primary.
- const session = primaryNode.getDB(dbName).getMongo().startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({"prepare": "entry"}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+// Issue a 'prepareTransaction' command just to the current primary.
+const session = primaryNode.getDB(dbName).getMongo().startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({"prepare": "entry"}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- const rollbackTest = new RollbackTest(collName, rst);
- // Stop replication from the current primary ("rollbackNode").
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
+const rollbackTest = new RollbackTest(collName, rst);
+// Stop replication from the current primary ("rollbackNode").
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
+PrepareHelpers.commitTransaction(session, prepareTimestamp);
- // Step down current primary and elect a node that lacks the commit.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+// Step down current primary and elect a node that lacks the commit.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- // Verify the old primary crashes trying to roll back.
- clearRawMongoProgramOutput();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- jsTestLog("Waiting for crash");
- assert.soon(function() {
- try {
- rollbackNode.getDB("local").runCommand({ping: 1});
- } catch (e) {
- return true;
- }
- return false;
- }, "Node did not fassert", ReplSetTest.kDefaultTimeoutMS);
+// Verify the old primary crashes trying to roll back.
+clearRawMongoProgramOutput();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+jsTestLog("Waiting for crash");
+assert.soon(function() {
+ try {
+ rollbackNode.getDB("local").runCommand({ping: 1});
+ } catch (e) {
+ return true;
+ }
+ return false;
+}, "Node did not fassert", ReplSetTest.kDefaultTimeoutMS);
- // Let the ReplSetTest know the old primary is down.
- rst.stop(rst.getNodeId(rollbackNode), undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
+// Let the ReplSetTest know the old primary is down.
+rst.stop(rst.getNodeId(rollbackNode), undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT});
- const msg = RegExp("Can't roll back this command yet: ");
- assert.soon(function() {
- return rawMongoProgramOutput().match(msg);
- }, "Node did not fail to roll back entry.");
+const msg = RegExp("Can't roll back this command yet: ");
+assert.soon(function() {
+ return rawMongoProgramOutput().match(msg);
+}, "Node did not fail to roll back entry.");
- // Transaction is still in prepared state and validation will be blocked, so skip it.
- rst.stopSet(undefined, undefined, {skipValidation: true});
+// Transaction is still in prepared state and validation will be blocked, so skip it.
+rst.stopSet(undefined, undefined, {skipValidation: true});
}());
diff --git a/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js b/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js
index f4c4b7575ad..7cf47857d2a 100644
--- a/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js
+++ b/jstests/replsets/rollback_via_refetch_survives_nonexistent_collection_drop.js
@@ -10,65 +10,63 @@
*/
(function() {
- "use strict";
- load("jstests/libs/check_log.js");
- load("jstests/replsets/libs/rollback_test.js");
+"use strict";
+load("jstests/libs/check_log.js");
+load("jstests/replsets/libs/rollback_test.js");
- const dbName = "test";
- const collName = "rollback_via_refetch_survives_nonexistent_collection_drop";
+const dbName = "test";
+const collName = "rollback_via_refetch_survives_nonexistent_collection_drop";
- // Provide RollbackTest with custom ReplSetTest so we can set enableMajorityReadConcern.
- const rst = new ReplSetTest({
- name: collName,
- nodes: 3,
- useBridge: true,
- nodeOptions: {enableMajorityReadConcern: "false"}
- });
+// Provide RollbackTest with custom ReplSetTest so we can set enableMajorityReadConcern.
+const rst = new ReplSetTest(
+ {name: collName, nodes: 3, useBridge: true, nodeOptions: {enableMajorityReadConcern: "false"}});
- rst.startSet();
- const config = rst.getReplSetConfig();
- config.members[2].priority = 0;
- config.settings = {chainingAllowed: false};
- rst.initiate(config);
+rst.startSet();
+const config = rst.getReplSetConfig();
+config.members[2].priority = 0;
+config.settings = {
+ chainingAllowed: false
+};
+rst.initiate(config);
- const rollbackTest = new RollbackTest(collName, rst);
+const rollbackTest = new RollbackTest(collName, rst);
- // Stop replication from the current primary, the rollback node.
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- const rollbackDB = rollbackNode.getDB(dbName);
+// Stop replication from the current primary, the rollback node.
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+const rollbackDB = rollbackNode.getDB(dbName);
- jsTestLog("Turning on the rollbackExitEarlyAfterCollectionDrop fail point");
- assert.commandWorked(rollbackDB.adminCommand(
- {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'alwaysOn'}));
+jsTestLog("Turning on the rollbackExitEarlyAfterCollectionDrop fail point");
+assert.commandWorked(rollbackDB.adminCommand(
+ {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'alwaysOn'}));
- // Create a collection on the rollback node.
- assert.commandWorked(rollbackDB.runCommand({create: collName}));
+// Create a collection on the rollback node.
+assert.commandWorked(rollbackDB.runCommand({create: collName}));
- // Step down the current primary and elect the node that does not have the collection.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+// Step down the current primary and elect the node that does not have the collection.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- jsTestLog("Attempting to roll back.");
- // Make the old primary rollback against the new primary. This attempt should fail because the
- // rollbackExitEarlyAfterCollectionDrop fail point is set. We fail with a recoverable error
- // so that the rollback will be retried.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+jsTestLog("Attempting to roll back.");
+// Make the old primary rollback against the new primary. This attempt should fail because the
+// rollbackExitEarlyAfterCollectionDrop fail point is set. We fail with a recoverable error
+// so that the rollback will be retried.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- // Make sure we exit the rollback early by checking for the correct log messages.
- checkLog.contains(rollbackDB.getMongo(),
- "rollbackExitEarlyAfterCollectionDrop fail point enabled.");
+// Make sure we exit the rollback early by checking for the correct log messages.
+checkLog.contains(rollbackDB.getMongo(),
+ "rollbackExitEarlyAfterCollectionDrop fail point enabled.");
- jsTestLog("Turning off the rollbackExitEarlyAfterCollectionDrop fail point");
- // A rollback attempt after turning off the fail point should succeed even if we already
- // dropped the collection.
- assert.commandWorked(rollbackDB.adminCommand(
- {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'off'}));
+jsTestLog("Turning off the rollbackExitEarlyAfterCollectionDrop fail point");
+// A rollback attempt after turning off the fail point should succeed even if we already
+// dropped the collection.
+assert.commandWorked(rollbackDB.adminCommand(
+ {configureFailPoint: 'rollbackExitEarlyAfterCollectionDrop', mode: 'off'}));
- rollbackTest.transitionToSteadyStateOperations();
+rollbackTest.transitionToSteadyStateOperations();
- // After a successful rollback attempt, we should have seen the following log message to ensure
- // that we tried to drop a non-existent collection and continued without acquiring a database
- // lock.
- checkLog.contains(rollbackDB.getMongo(), "This collection does not exist");
+// After a successful rollback attempt, we should have seen the following log message to ensure
+// that we tried to drop a non-existent collection and continued without acquiring a database
+// lock.
+checkLog.contains(rollbackDB.getMongo(), "This collection does not exist");
- rollbackTest.stop();
+rollbackTest.stop();
}()); \ No newline at end of file
diff --git a/jstests/replsets/rollback_views.js b/jstests/replsets/rollback_views.js
index a7c89014de6..a802eb81663 100644
--- a/jstests/replsets/rollback_views.js
+++ b/jstests/replsets/rollback_views.js
@@ -17,122 +17,121 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
-
- // Run a command, return the result if it worked, or assert with a message otherwise.
- let checkedRunCommand = (db, cmd) =>
- ((res, msg) => (assert.commandWorked(res, msg), res))(db.runCommand(cmd), tojson(cmd));
-
- // Like db.getCollectionNames, but allows a filter.
- let getCollectionNames = (db, filter) => checkedRunCommand(db, {listCollections: 1, filter})
- .cursor.firstBatch.map((entry) => entry.name)
- .sort();
-
- // Function that checks that all array elements are equal, and returns the unique element.
- let checkEqual = (array, what) =>
- array.reduce((x, y) => assert.eq(x, y, "nodes don't have matching " + what) || x);
-
- // Helper function for verifying database contents at the end of the test.
- let checkFinalResults = (dbs, expectedColls, expectedViews) => ({
- dbname: checkEqual(dbs, "names"),
- colls: checkEqual(
- dbs.map((db) => getCollectionNames(db, {type: "collection"})).concat([expectedColls]),
- "colls"),
- views: checkEqual(
- dbs.map((db) => getCollectionNames(db, {type: "view"})).concat([expectedViews]),
- "views"),
- md5: checkEqual(dbs.map((db) => checkedRunCommand(db, {dbHash: 1}).md5), "hashes")
- });
-
- let name = "rollback_views.js";
- let replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
- let nodes = replTest.nodeList();
-
- let conns = replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], priority: 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}
- ]
- });
-
- // Make sure we have a primary and that that primary is node A.
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
-
- let nodeA = conns[0];
- let nodeB = conns[1];
- let arbiter = conns[2];
-
- let a1 = nodeA.getDB("test1");
- let b1 = nodeB.getDB("test1");
-
- // Initial data for both nodes.
- assert.writeOK(a1.coll.insert([{_id: 1, x: 1}, {_id: 2, x: 2}]));
-
- // Wait for initial replication.
- replTest.awaitReplication();
-
- // Isolate A and wait for B to become primary.
- nodeA.disconnect(nodeB);
- nodeA.disconnect(arbiter);
- assert.soon(() => replTest.getPrimary() == nodeB, "node B did not become primary as expected");
-
- // Do operations on B and B alone, these will be rolled back.
- // For the collection creation, first create a view with the same name, stressing rollback.
- assert.writeOK(b1.coll.remove({x: 2}));
- assert.commandWorked(b1.createView("x", "coll", [{$match: {x: 1}}]));
- let b2 = b1.getSiblingDB("test2");
- assert.writeOK(b2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
- assert.commandWorked(b2.createView("y", "coll", [{$match: {y: 2}}]));
- let b3 = b1.getSiblingDB("test3");
- assert.commandWorked(b3.createView("z", "coll", []));
- assert.writeOK(b3.system.views.remove({}));
- assert.writeOK(b3.z.insert([{z: 1}, {z: 2}, {z: 3}]));
- assert.writeOK(b3.z.remove({z: 1}));
-
- // Isolate B, bring A back into contact with the arbiter, then wait for A to become primary.
- // Insert new data into A, so that B will need to rollback when it reconnects to A.
- nodeB.disconnect(arbiter);
- replTest.awaitNoPrimary();
- nodeA.reconnect(arbiter);
- assert.soon(() => replTest.getPrimary() == nodeA, "nodeA did not become primary as expected");
-
- // A is now primary and will perform writes that must be copied by B after rollback.
- assert.eq(a1.coll.find().itcount(), 2, "expected two documents in test1.coll");
- assert.writeOK(a1.x.insert({_id: 3, x: "string in test1.x"}));
- let a2 = a1.getSiblingDB("test2");
- assert.commandWorked(a2.createView("y", "coll", [{$match: {y: 2}}]));
- assert.writeOK(a2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
- let a3 = a1.getSiblingDB("test3");
- assert.writeOK(a3.coll.insert([{z: 1}, {z: 2}, {z: 3}]));
- assert.commandWorked(a3.createView("z", "coll", [{$match: {z: 3}}]));
-
- // A is collections: test1.{coll,x}, test2.{coll,system.views}, test3.{coll,system.views}
- // views: test2.y, test3.z
- // B is collections: test1.{coll,system.views}, test2.{coll,systems}, test3.{z,system.views}
- // views: test1.x, test2.y
- //
- // Put B back in contact with A and arbiter. A is primary, so B will rollback and catch up.
- nodeB.reconnect(arbiter);
- nodeA.reconnect(nodeB);
-
- awaitOpTime(nodeB, nodeA);
-
- // Await steady state and ensure the two nodes have the same contents.
- replTest.awaitSecondaryNodes();
- replTest.awaitReplication();
-
- // Check both nodes agree with each other and with the expected set of views and collections.
- print("All done, check that both nodes have the expected collections, views and md5.");
- printjson(checkFinalResults([a1, b1], ["coll", "x"], []));
- printjson(checkFinalResults([a2, b2], ["coll", "system.views"], ["y"]));
- printjson(checkFinalResults([a3, b3], ["coll", "system.views"], ["z"]));
-
- // Verify data consistency between nodes.
- replTest.checkReplicatedDataHashes();
- replTest.checkOplogs();
-
- replTest.stopSet();
+"use strict";
+
+// Run a command, return the result if it worked, or assert with a message otherwise.
+let checkedRunCommand = (db, cmd) =>
+ ((res, msg) => (assert.commandWorked(res, msg), res))(db.runCommand(cmd), tojson(cmd));
+
+// Like db.getCollectionNames, but allows a filter.
+let getCollectionNames = (db, filter) => checkedRunCommand(db, {listCollections: 1, filter})
+ .cursor.firstBatch.map((entry) => entry.name)
+ .sort();
+
+// Function that checks that all array elements are equal, and returns the unique element.
+let checkEqual = (array, what) =>
+ array.reduce((x, y) => assert.eq(x, y, "nodes don't have matching " + what) || x);
+
+// Helper function for verifying database contents at the end of the test.
+let checkFinalResults = (dbs, expectedColls, expectedViews) => ({
+ dbname: checkEqual(dbs, "names"),
+ colls: checkEqual(
+ dbs.map((db) => getCollectionNames(db, {type: "collection"})).concat([expectedColls]),
+ "colls"),
+ views: checkEqual(
+ dbs.map((db) => getCollectionNames(db, {type: "view"})).concat([expectedViews]), "views"),
+ md5: checkEqual(dbs.map((db) => checkedRunCommand(db, {dbHash: 1}).md5), "hashes")
+});
+
+let name = "rollback_views.js";
+let replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
+let nodes = replTest.nodeList();
+
+let conns = replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
+
+// Make sure we have a primary and that that primary is node A.
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+
+let nodeA = conns[0];
+let nodeB = conns[1];
+let arbiter = conns[2];
+
+let a1 = nodeA.getDB("test1");
+let b1 = nodeB.getDB("test1");
+
+// Initial data for both nodes.
+assert.writeOK(a1.coll.insert([{_id: 1, x: 1}, {_id: 2, x: 2}]));
+
+// Wait for initial replication.
+replTest.awaitReplication();
+
+// Isolate A and wait for B to become primary.
+nodeA.disconnect(nodeB);
+nodeA.disconnect(arbiter);
+assert.soon(() => replTest.getPrimary() == nodeB, "node B did not become primary as expected");
+
+// Do operations on B and B alone, these will be rolled back.
+// For the collection creation, first create a view with the same name, stressing rollback.
+assert.writeOK(b1.coll.remove({x: 2}));
+assert.commandWorked(b1.createView("x", "coll", [{$match: {x: 1}}]));
+let b2 = b1.getSiblingDB("test2");
+assert.writeOK(b2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
+assert.commandWorked(b2.createView("y", "coll", [{$match: {y: 2}}]));
+let b3 = b1.getSiblingDB("test3");
+assert.commandWorked(b3.createView("z", "coll", []));
+assert.writeOK(b3.system.views.remove({}));
+assert.writeOK(b3.z.insert([{z: 1}, {z: 2}, {z: 3}]));
+assert.writeOK(b3.z.remove({z: 1}));
+
+// Isolate B, bring A back into contact with the arbiter, then wait for A to become primary.
+// Insert new data into A, so that B will need to rollback when it reconnects to A.
+nodeB.disconnect(arbiter);
+replTest.awaitNoPrimary();
+nodeA.reconnect(arbiter);
+assert.soon(() => replTest.getPrimary() == nodeA, "nodeA did not become primary as expected");
+
+// A is now primary and will perform writes that must be copied by B after rollback.
+assert.eq(a1.coll.find().itcount(), 2, "expected two documents in test1.coll");
+assert.writeOK(a1.x.insert({_id: 3, x: "string in test1.x"}));
+let a2 = a1.getSiblingDB("test2");
+assert.commandWorked(a2.createView("y", "coll", [{$match: {y: 2}}]));
+assert.writeOK(a2.coll.insert([{_id: 1, y: 1}, {_id: 2, y: 2}]));
+let a3 = a1.getSiblingDB("test3");
+assert.writeOK(a3.coll.insert([{z: 1}, {z: 2}, {z: 3}]));
+assert.commandWorked(a3.createView("z", "coll", [{$match: {z: 3}}]));
+
+// A is collections: test1.{coll,x}, test2.{coll,system.views}, test3.{coll,system.views}
+// views: test2.y, test3.z
+// B is collections: test1.{coll,system.views}, test2.{coll,systems}, test3.{z,system.views}
+// views: test1.x, test2.y
+//
+// Put B back in contact with A and arbiter. A is primary, so B will rollback and catch up.
+nodeB.reconnect(arbiter);
+nodeA.reconnect(nodeB);
+
+awaitOpTime(nodeB, nodeA);
+
+// Await steady state and ensure the two nodes have the same contents.
+replTest.awaitSecondaryNodes();
+replTest.awaitReplication();
+
+// Check both nodes agree with each other and with the expected set of views and collections.
+print("All done, check that both nodes have the expected collections, views and md5.");
+printjson(checkFinalResults([a1, b1], ["coll", "x"], []));
+printjson(checkFinalResults([a2, b2], ["coll", "system.views"], ["y"]));
+printjson(checkFinalResults([a3, b3], ["coll", "system.views"], ["z"]));
+
+// Verify data consistency between nodes.
+replTest.checkReplicatedDataHashes();
+replTest.checkOplogs();
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/rollback_waits_for_bgindex_completion.js b/jstests/replsets/rollback_waits_for_bgindex_completion.js
index 32e99124587..e6433d558e6 100644
--- a/jstests/replsets/rollback_waits_for_bgindex_completion.js
+++ b/jstests/replsets/rollback_waits_for_bgindex_completion.js
@@ -5,87 +5,87 @@
* @tags: [requires_wiredtiger, requires_journaling, requires_majority_read_concern]
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/check_log.js');
- load("jstests/replsets/rslib.js");
- load('jstests/replsets/libs/rollback_test.js');
+load('jstests/libs/check_log.js');
+load("jstests/replsets/rslib.js");
+load('jstests/replsets/libs/rollback_test.js');
+const dbName = "dbWithBgIndex";
+const collName = 'coll';
+let bgIndexThread;
+
+function hangIndexBuildsFailpoint(node, fpMode) {
+ assert.commandWorked(node.adminCommand(
+ {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: fpMode}));
+}
+
+/**
+ * A function to create a background index on the test collection in a parallel shell.
+ */
+function createBgIndexFn() {
+ // Re-define constants, since they are not shared between shells.
const dbName = "dbWithBgIndex";
- const collName = 'coll';
- let bgIndexThread;
-
- function hangIndexBuildsFailpoint(node, fpMode) {
- assert.commandWorked(node.adminCommand(
- {configureFailPoint: 'hangAfterStartingIndexBuildUnlocked', mode: fpMode}));
- }
-
- /**
- * A function to create a background index on the test collection in a parallel shell.
- */
- function createBgIndexFn() {
- // Re-define constants, since they are not shared between shells.
- const dbName = "dbWithBgIndex";
- const collName = "coll";
- let testDB = db.getSiblingDB(dbName);
- jsTestLog("Starting background index build from parallel shell.");
- assert.commandWorked(testDB[collName].createIndex({x: 1}, {background: true}));
- }
-
- /**
- * Operations that will get replicated to both replica set nodes before rollback.
- *
- * These common operations are run against the node that will eventually go into rollback, so
- * the failpoints will only be enabled on the rollback node.
- */
- function CommonOps(node) {
- // Create a collection on both data bearing nodes, so we can create an index on it.
- const testDB = node.getDB(dbName);
- assert.commandWorked(testDB.createCollection(collName));
-
- // Hang background index builds.
- hangIndexBuildsFailpoint(node, "alwaysOn");
-
- jsTestLog("Starting background index build parallel shell.");
- bgIndexThread = startParallelShell(createBgIndexFn, node.port);
-
- // Make sure the index build started and hit the failpoint.
- jsTestLog("Waiting for background index build to start and hang due to failpoint.");
- checkLog.contains(node, "index build: starting on " + testDB[collName].getFullName());
- checkLog.contains(node, "Hanging index build with no locks");
- }
-
- const rollbackTest = new RollbackTest();
- const originalPrimary = rollbackTest.getPrimary();
- CommonOps(originalPrimary);
-
- // Insert a document so that there is an operation to rollback.
- const rollbackNode = rollbackTest.transitionToRollbackOperations();
- assert.writeOK(rollbackNode.getDB(dbName)["rollbackColl"].insert({x: 1}));
-
- // Allow rollback to start. There are no sync source ops.
- rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
-
- // Make sure that rollback is hung waiting for the background index operation to complete.
- jsTestLog("Waiting for rollback to block on the background index build completion.");
- let msg1 = "Waiting for all background operations to complete before starting rollback";
- let msg2 = "Waiting for 1 background operations to complete on database '" + dbName + "'";
- checkLog.contains(rollbackNode, msg1);
- checkLog.contains(rollbackNode, msg2);
-
- // Now turn off the index build failpoint, allowing rollback to continue and finish.
- jsTestLog(
- "Disabling 'hangAfterStartingIndexBuildUnlocked' failpoint on the rollback node so background index build can complete.");
- hangIndexBuildsFailpoint(rollbackNode, "off");
-
- // Make sure the background index build completed before rollback started.
- checkLog.contains(rollbackNode,
- "Finished waiting for background operations to complete before rollback");
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSteadyStateOperations();
-
- // Check the replica set.
- rollbackTest.stop();
+ const collName = "coll";
+ let testDB = db.getSiblingDB(dbName);
+ jsTestLog("Starting background index build from parallel shell.");
+ assert.commandWorked(testDB[collName].createIndex({x: 1}, {background: true}));
+}
+
+/**
+ * Operations that will get replicated to both replica set nodes before rollback.
+ *
+ * These common operations are run against the node that will eventually go into rollback, so
+ * the failpoints will only be enabled on the rollback node.
+ */
+function CommonOps(node) {
+ // Create a collection on both data bearing nodes, so we can create an index on it.
+ const testDB = node.getDB(dbName);
+ assert.commandWorked(testDB.createCollection(collName));
+
+ // Hang background index builds.
+ hangIndexBuildsFailpoint(node, "alwaysOn");
+
+ jsTestLog("Starting background index build parallel shell.");
+ bgIndexThread = startParallelShell(createBgIndexFn, node.port);
+
+ // Make sure the index build started and hit the failpoint.
+ jsTestLog("Waiting for background index build to start and hang due to failpoint.");
+ checkLog.contains(node, "index build: starting on " + testDB[collName].getFullName());
+ checkLog.contains(node, "Hanging index build with no locks");
+}
+
+const rollbackTest = new RollbackTest();
+const originalPrimary = rollbackTest.getPrimary();
+CommonOps(originalPrimary);
+
+// Insert a document so that there is an operation to rollback.
+const rollbackNode = rollbackTest.transitionToRollbackOperations();
+assert.writeOK(rollbackNode.getDB(dbName)["rollbackColl"].insert({x: 1}));
+
+// Allow rollback to start. There are no sync source ops.
+rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+
+// Make sure that rollback is hung waiting for the background index operation to complete.
+jsTestLog("Waiting for rollback to block on the background index build completion.");
+let msg1 = "Waiting for all background operations to complete before starting rollback";
+let msg2 = "Waiting for 1 background operations to complete on database '" + dbName + "'";
+checkLog.contains(rollbackNode, msg1);
+checkLog.contains(rollbackNode, msg2);
+
+// Now turn off the index build failpoint, allowing rollback to continue and finish.
+jsTestLog(
+ "Disabling 'hangAfterStartingIndexBuildUnlocked' failpoint on the rollback node so background index build can complete.");
+hangIndexBuildsFailpoint(rollbackNode, "off");
+
+// Make sure the background index build completed before rollback started.
+checkLog.contains(rollbackNode,
+ "Finished waiting for background operations to complete before rollback");
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSteadyStateOperations();
+
+// Check the replica set.
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/rollback_with_socket_error_then_steady_state.js b/jstests/replsets/rollback_with_socket_error_then_steady_state.js
index 29f060757d1..713658e1b5f 100644
--- a/jstests/replsets/rollback_with_socket_error_then_steady_state.js
+++ b/jstests/replsets/rollback_with_socket_error_then_steady_state.js
@@ -4,134 +4,133 @@
// node would be "stuck" with state=ROLLBACK while it was doing steady-state replication, with no
// way to reach SECONDARY without restarting the process.
(function() {
- 'use strict';
-
- load("jstests/libs/check_log.js");
- load("jstests/replsets/rslib.js");
-
- var collName = "test.coll";
- var counter = 0;
-
- var rst = new ReplSetTest({
- name: 'rollback_with_socket_error_then_steady_state',
- nodes: [
- // Primary flops between nodes 0 and 1.
- {},
- {},
- // Node 2 is the node under test.
- {rsConfig: {priority: 0}},
- // Arbiters to sway elections.
- {rsConfig: {arbiterOnly: true}},
- {rsConfig: {arbiterOnly: true}}
- ],
- useBridge: true
- });
- var nodes = rst.startSet();
- rst.initiate();
-
- function stepUp(rst, node) {
- var primary = rst.getPrimary();
- if (primary != node) {
- assert.commandWorked(primary.adminCommand({replSetStepDown: 1, force: true}));
- }
- waitForState(node, ReplSetTest.State.PRIMARY);
+'use strict';
+
+load("jstests/libs/check_log.js");
+load("jstests/replsets/rslib.js");
+
+var collName = "test.coll";
+var counter = 0;
+
+var rst = new ReplSetTest({
+ name: 'rollback_with_socket_error_then_steady_state',
+ nodes: [
+ // Primary flops between nodes 0 and 1.
+ {},
+ {},
+ // Node 2 is the node under test.
+ {rsConfig: {priority: 0}},
+ // Arbiters to sway elections.
+ {rsConfig: {arbiterOnly: true}},
+ {rsConfig: {arbiterOnly: true}}
+ ],
+ useBridge: true
+});
+var nodes = rst.startSet();
+rst.initiate();
+
+function stepUp(rst, node) {
+ var primary = rst.getPrimary();
+ if (primary != node) {
+ assert.commandWorked(primary.adminCommand({replSetStepDown: 1, force: true}));
}
-
- jsTestLog("Make sure node 0 is primary.");
- stepUp(rst, nodes[0]);
- assert.eq(nodes[0], rst.getPrimary());
- // Wait for all data bearing nodes to get up to date.
- assert.writeOK(nodes[0].getCollection(collName).insert(
- {a: counter++}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
-
- jsTestLog("Create two partitions: [1] and [0,2,3,4].");
- nodes[1].disconnect(nodes[0]);
- nodes[1].disconnect(nodes[2]);
- nodes[1].disconnect(nodes[3]);
- nodes[1].disconnect(nodes[4]);
-
- jsTestLog("Do a write that is replicated to [0,2,3,4].");
- assert.writeOK(nodes[0].getCollection(collName).insert(
- {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
-
- jsTestLog("Repartition to: [0,2] and [1,3,4].");
- nodes[1].reconnect(nodes[3]);
- nodes[1].reconnect(nodes[4]);
- nodes[3].disconnect(nodes[0]);
- nodes[3].disconnect(nodes[2]);
- nodes[4].disconnect(nodes[0]);
- nodes[4].disconnect(nodes[2]);
-
- jsTestLog("Ensure that 0 steps down and that 1 becomes primary.");
- waitForState(nodes[0], ReplSetTest.State.SECONDARY);
- waitForState(nodes[1], ReplSetTest.State.PRIMARY);
- assert.eq(nodes[1], rst.getPrimary());
-
- jsTestLog("Do a write to node 1 on the [1,3,4] side of the partition.");
- assert.writeOK(nodes[1].getCollection(collName).insert({a: counter++}));
-
- // Turn on failpoint on node 2 to pause rollback before doing anything.
- assert.commandWorked(
- nodes[2].adminCommand({configureFailPoint: 'rollbackHangBeforeStart', mode: 'alwaysOn'}));
-
- jsTestLog("Repartition to: [0] and [1,2,3,4].");
- nodes[2].disconnect(nodes[0]);
- nodes[2].reconnect(nodes[1]);
- nodes[2].reconnect(nodes[3]);
- nodes[2].reconnect(nodes[4]);
-
- jsTestLog("Wait for node 2 to decide to go into ROLLBACK and start syncing from node 1.");
- // Since nodes 1 and 2 have now diverged, node 2 should go into rollback. The failpoint will
- // stop it from actually transitioning to rollback, so the checkLog bellow will ensure that we
- // have decided to rollback, but haven't actually started yet.
- rst.awaitSyncSource(nodes[2], nodes[1]);
-
- jsTestLog("Wait for failpoint on node 2 to pause rollback before it starts");
- // Wait for fail point message to be logged.
- checkLog.contains(nodes[2], 'rollback - rollbackHangBeforeStart fail point enabled');
-
- jsTestLog("Repartition to: [1] and [0,2,3,4].");
- nodes[1].disconnect(nodes[3]);
- nodes[1].disconnect(nodes[4]);
- nodes[2].disconnect(nodes[1]);
- nodes[2].reconnect(nodes[0]);
- nodes[3].reconnect(nodes[0]);
- nodes[3].reconnect(nodes[2]);
- nodes[4].reconnect(nodes[0]);
- nodes[4].reconnect(nodes[2]);
-
- // Turn off failpoint on node 2 to allow rollback against node 1 to fail with a network error.
- assert.adminCommandWorkedAllowingNetworkError(
- nodes[2], {configureFailPoint: 'rollbackHangBeforeStart', mode: 'off'});
-
- // Make node 0 ahead of node 2 again so node 2 will pick it as a sync source.
-
- jsTestLog("waiting for node 0 to be primary");
- waitForState(nodes[1], ReplSetTest.State.SECONDARY);
- waitForState(nodes[0], ReplSetTest.State.PRIMARY);
- assert.eq(nodes[0], rst.getPrimary());
-
- jsTestLog("w:2 write to node 0 (replicated to node 2)");
- assert.writeOK(nodes[0].getCollection(collName).insert(
- {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
-
- // At this point node 2 has failed rollback before making any durable changes, including writing
- // to minValid. That means that it is free to pick any sync source and will pick node 0 where it
- // can pick up where it left off without rolling back. Ensure that it is able to reach SECONDARY
- // and doesn't do steady-state replication in ROLLBACK state.
- jsTestLog("Wait for node 2 to go into SECONDARY");
- assert.neq(nodes[2].adminCommand('replSetGetStatus').myState,
- ReplSetTest.State.ROLLBACK,
- "node 2 is doing steady-state replication with state=ROLLBACK!");
- waitForState(nodes[2], ReplSetTest.State.SECONDARY);
-
- // Re-connect all nodes and await secondary nodes so we can check data consistency.
- nodes[1].reconnect([nodes[0], nodes[2], nodes[3], nodes[4]]);
- rst.awaitSecondaryNodes();
-
- // Verify data consistency between nodes.
- rst.checkReplicatedDataHashes();
- rst.checkOplogs();
- rst.stopSet();
-
+ waitForState(node, ReplSetTest.State.PRIMARY);
+}
+
+jsTestLog("Make sure node 0 is primary.");
+stepUp(rst, nodes[0]);
+assert.eq(nodes[0], rst.getPrimary());
+// Wait for all data bearing nodes to get up to date.
+assert.writeOK(nodes[0].getCollection(collName).insert(
+ {a: counter++}, {writeConcern: {w: 3, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+
+jsTestLog("Create two partitions: [1] and [0,2,3,4].");
+nodes[1].disconnect(nodes[0]);
+nodes[1].disconnect(nodes[2]);
+nodes[1].disconnect(nodes[3]);
+nodes[1].disconnect(nodes[4]);
+
+jsTestLog("Do a write that is replicated to [0,2,3,4].");
+assert.writeOK(nodes[0].getCollection(collName).insert(
+ {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+
+jsTestLog("Repartition to: [0,2] and [1,3,4].");
+nodes[1].reconnect(nodes[3]);
+nodes[1].reconnect(nodes[4]);
+nodes[3].disconnect(nodes[0]);
+nodes[3].disconnect(nodes[2]);
+nodes[4].disconnect(nodes[0]);
+nodes[4].disconnect(nodes[2]);
+
+jsTestLog("Ensure that 0 steps down and that 1 becomes primary.");
+waitForState(nodes[0], ReplSetTest.State.SECONDARY);
+waitForState(nodes[1], ReplSetTest.State.PRIMARY);
+assert.eq(nodes[1], rst.getPrimary());
+
+jsTestLog("Do a write to node 1 on the [1,3,4] side of the partition.");
+assert.writeOK(nodes[1].getCollection(collName).insert({a: counter++}));
+
+// Turn on failpoint on node 2 to pause rollback before doing anything.
+assert.commandWorked(
+ nodes[2].adminCommand({configureFailPoint: 'rollbackHangBeforeStart', mode: 'alwaysOn'}));
+
+jsTestLog("Repartition to: [0] and [1,2,3,4].");
+nodes[2].disconnect(nodes[0]);
+nodes[2].reconnect(nodes[1]);
+nodes[2].reconnect(nodes[3]);
+nodes[2].reconnect(nodes[4]);
+
+jsTestLog("Wait for node 2 to decide to go into ROLLBACK and start syncing from node 1.");
+// Since nodes 1 and 2 have now diverged, node 2 should go into rollback. The failpoint will
+// stop it from actually transitioning to rollback, so the checkLog bellow will ensure that we
+// have decided to rollback, but haven't actually started yet.
+rst.awaitSyncSource(nodes[2], nodes[1]);
+
+jsTestLog("Wait for failpoint on node 2 to pause rollback before it starts");
+// Wait for fail point message to be logged.
+checkLog.contains(nodes[2], 'rollback - rollbackHangBeforeStart fail point enabled');
+
+jsTestLog("Repartition to: [1] and [0,2,3,4].");
+nodes[1].disconnect(nodes[3]);
+nodes[1].disconnect(nodes[4]);
+nodes[2].disconnect(nodes[1]);
+nodes[2].reconnect(nodes[0]);
+nodes[3].reconnect(nodes[0]);
+nodes[3].reconnect(nodes[2]);
+nodes[4].reconnect(nodes[0]);
+nodes[4].reconnect(nodes[2]);
+
+// Turn off failpoint on node 2 to allow rollback against node 1 to fail with a network error.
+assert.adminCommandWorkedAllowingNetworkError(
+ nodes[2], {configureFailPoint: 'rollbackHangBeforeStart', mode: 'off'});
+
+// Make node 0 ahead of node 2 again so node 2 will pick it as a sync source.
+
+jsTestLog("waiting for node 0 to be primary");
+waitForState(nodes[1], ReplSetTest.State.SECONDARY);
+waitForState(nodes[0], ReplSetTest.State.PRIMARY);
+assert.eq(nodes[0], rst.getPrimary());
+
+jsTestLog("w:2 write to node 0 (replicated to node 2)");
+assert.writeOK(nodes[0].getCollection(collName).insert(
+ {a: counter++}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+
+// At this point node 2 has failed rollback before making any durable changes, including writing
+// to minValid. That means that it is free to pick any sync source and will pick node 0 where it
+// can pick up where it left off without rolling back. Ensure that it is able to reach SECONDARY
+// and doesn't do steady-state replication in ROLLBACK state.
+jsTestLog("Wait for node 2 to go into SECONDARY");
+assert.neq(nodes[2].adminCommand('replSetGetStatus').myState,
+ ReplSetTest.State.ROLLBACK,
+ "node 2 is doing steady-state replication with state=ROLLBACK!");
+waitForState(nodes[2], ReplSetTest.State.SECONDARY);
+
+// Re-connect all nodes and await secondary nodes so we can check data consistency.
+nodes[1].reconnect([nodes[0], nodes[2], nodes[3], nodes[4]]);
+rst.awaitSecondaryNodes();
+
+// Verify data consistency between nodes.
+rst.checkReplicatedDataHashes();
+rst.checkOplogs();
+rst.stopSet();
}());
diff --git a/jstests/replsets/rollover_preserves_active_txns.js b/jstests/replsets/rollover_preserves_active_txns.js
index 326cd764746..064913fda10 100644
--- a/jstests/replsets/rollover_preserves_active_txns.js
+++ b/jstests/replsets/rollover_preserves_active_txns.js
@@ -10,90 +10,88 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- // A new replica set for both the commit and abort tests to ensure the same clean state.
- function doTest(commitOrAbort) {
- const replSet = new ReplSetTest({
- // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
- nodeOptions:
- {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
- nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
- });
-
- replSet.startSet(PrepareHelpers.replSetStartSetOptions);
- replSet.initiate();
-
- const primary = replSet.getPrimary();
- const secondary = replSet.getSecondary();
- const primaryOplog = primary.getDB("local").oplog.rs;
- assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
- const secondaryOplog = secondary.getDB("local").oplog.rs;
- assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
-
- const coll = primary.getDB("test").test;
- assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
-
- jsTestLog("Prepare a transaction");
-
- const session = primary.startSession();
- session.startTransaction();
- assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const oldestRequiredTimestampForCrashRecovery =
- PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
- assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
-
- jsTestLog("Get transaction entry from config.transactions");
-
- const txnEntry = primary.getDB("config").transactions.findOne();
- assert.lte(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
-
- assert.soonNoExcept(() => {
- const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
- assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
- return true;
- });
-
- jsTestLog("Find prepare oplog entry");
-
- const oplogEntry = PrepareHelpers.findPrepareEntry(primaryOplog);
- assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
- // Must already be written on secondary, since the config.transactions entry is.
- const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
- assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
-
- jsTestLog("Insert documents until oplog exceeds oplogSize");
-
- // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
- PrepareHelpers.growOplogPastMaxSize(replSet);
-
- jsTestLog(
- `Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
-
- assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(primaryOplog));
- assert.soon(() => {
- return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
- });
- assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(secondaryOplog));
-
- if (commitOrAbort === "commit") {
- jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
- PrepareHelpers.commitTransaction(session, prepareTimestamp);
- } else if (commitOrAbort === "abort") {
- jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
- assert.commandWorked(session.abortTransaction_forTesting());
- } else {
- throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
- }
-
- PrepareHelpers.awaitOplogTruncation(replSet);
-
- replSet.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+// A new replica set for both the commit and abort tests to ensure the same clean state.
+function doTest(commitOrAbort) {
+ const replSet = new ReplSetTest({
+ // Oplog can be truncated each "sync" cycle. Increase its frequency to once per second.
+ nodeOptions: {syncdelay: 1, setParameter: {logComponentVerbosity: tojson({storage: 1})}},
+ nodes: [{}, {rsConfig: {priority: 0, votes: 0}}]
+ });
+
+ replSet.startSet(PrepareHelpers.replSetStartSetOptions);
+ replSet.initiate();
+
+ const primary = replSet.getPrimary();
+ const secondary = replSet.getSecondary();
+ const primaryOplog = primary.getDB("local").oplog.rs;
+ assert.lte(primaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+ const secondaryOplog = secondary.getDB("local").oplog.rs;
+ assert.lte(secondaryOplog.dataSize(), PrepareHelpers.oplogSizeBytes);
+
+ const coll = primary.getDB("test").test;
+ assert.commandWorked(coll.insert({}, {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Prepare a transaction");
+
+ const session = primary.startSession();
+ session.startTransaction();
+ assert.commandWorked(session.getDatabase("test").test.insert({myTransaction: 1}));
+ const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+ const oldestRequiredTimestampForCrashRecovery =
+ PrepareHelpers.getOldestRequiredTimestampForCrashRecovery(primary.getDB("test"));
+ assert.lte(oldestRequiredTimestampForCrashRecovery, prepareTimestamp);
+
+ jsTestLog("Get transaction entry from config.transactions");
+
+ const txnEntry = primary.getDB("config").transactions.findOne();
+ assert.lte(txnEntry.startOpTime.ts, prepareTimestamp, tojson(txnEntry));
+
+ assert.soonNoExcept(() => {
+ const secondaryTxnEntry = secondary.getDB("config").transactions.findOne();
+ assert.eq(secondaryTxnEntry, txnEntry, tojson(secondaryTxnEntry));
+ return true;
+ });
+
+ jsTestLog("Find prepare oplog entry");
+
+ const oplogEntry = PrepareHelpers.findPrepareEntry(primaryOplog);
+ assert.eq(oplogEntry.ts, prepareTimestamp, tojson(oplogEntry));
+ // Must already be written on secondary, since the config.transactions entry is.
+ const secondaryOplogEntry = PrepareHelpers.findPrepareEntry(secondaryOplog);
+ assert.eq(secondaryOplogEntry.ts, prepareTimestamp, tojson(secondaryOplogEntry));
+
+ jsTestLog("Insert documents until oplog exceeds oplogSize");
+
+ // Oplog with prepared txn grows indefinitely - let it reach twice its supposed max size.
+ PrepareHelpers.growOplogPastMaxSize(replSet);
+
+ jsTestLog(`Oplog dataSize = ${primaryOplog.dataSize()}, check the prepare entry still exists`);
+
+ assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(primaryOplog));
+ assert.soon(() => {
+ return secondaryOplog.dataSize() > PrepareHelpers.oplogSizeBytes;
+ });
+ assert.eq(oplogEntry, PrepareHelpers.findPrepareEntry(secondaryOplog));
+
+ if (commitOrAbort === "commit") {
+ jsTestLog("Commit prepared transaction and wait for oplog to shrink to max oplogSize");
+ PrepareHelpers.commitTransaction(session, prepareTimestamp);
+ } else if (commitOrAbort === "abort") {
+ jsTestLog("Abort prepared transaction and wait for oplog to shrink to max oplogSize");
+ assert.commandWorked(session.abortTransaction_forTesting());
+ } else {
+ throw new Error(`Unrecognized value for commitOrAbort: ${commitOrAbort}`);
}
- doTest("commit");
- doTest("abort");
+ PrepareHelpers.awaitOplogTruncation(replSet);
+
+ replSet.stopSet();
+}
+
+doTest("commit");
+doTest("abort");
})();
diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js
index 2423baea807..209ed8172e1 100644
--- a/jstests/replsets/rslib.js
+++ b/jstests/replsets/rslib.js
@@ -16,449 +16,441 @@ var getLastOpTime;
var setLogVerbosity;
(function() {
- "use strict";
- load("jstests/libs/write_concern_util.js");
-
- var count = 0;
- var w = 0;
-
- /**
- * A wrapper around `replSetSyncFrom` to ensure that the desired sync source is ahead of the
- * syncing node so that the syncing node can choose to sync from the desired sync source.
- * It first stops replication on the syncing node so that it can do a write on the desired
- * sync source and make sure it's ahead. When replication is restarted, the desired sync
- * source will be a valid sync source for the syncing node.
- */
- syncFrom = function(syncingNode, desiredSyncSource, rst) {
- jsTestLog("Forcing " + syncingNode.name + " to sync from " + desiredSyncSource.name);
-
- // Ensure that 'desiredSyncSource' doesn't already have the dummy write sitting around from
- // a previous syncFrom attempt.
- var dummyName = "dummyForSyncFrom";
- rst.getPrimary().getDB(dummyName).getCollection(dummyName).drop();
- assert.soonNoExcept(function() {
- return desiredSyncSource.getDB(dummyName).getCollection(dummyName).findOne() == null;
- });
-
- stopServerReplication(syncingNode);
-
- assert.writeOK(rst.getPrimary().getDB(dummyName).getCollection(dummyName).insert({a: 1}));
- // Wait for 'desiredSyncSource' to get the dummy write we just did so we know it's
- // definitely ahead of 'syncingNode' before we call replSetSyncFrom.
- assert.soonNoExcept(function() {
- return desiredSyncSource.getDB(dummyName).getCollection(dummyName).findOne({a: 1});
- });
-
- assert.commandWorked(syncingNode.adminCommand({replSetSyncFrom: desiredSyncSource.name}));
- restartServerReplication(syncingNode);
- rst.awaitSyncSource(syncingNode, desiredSyncSource);
- };
-
- /**
- * Calls a function 'f' once a second until it returns true. Throws an exception once 'f' has
- * been called more than 'retries' times without returning true. If 'retries' is not given,
- * it defaults to 200. 'retries' must be an integer greater than or equal to zero.
- */
- wait = function(f, msg, retries) {
- w++;
- var n = 0;
- var default_retries = 200;
- var delay_interval_ms = 1000;
-
- // Set default value if 'retries' was not given.
- if (retries === undefined) {
- retries = default_retries;
+"use strict";
+load("jstests/libs/write_concern_util.js");
+
+var count = 0;
+var w = 0;
+
+/**
+ * A wrapper around `replSetSyncFrom` to ensure that the desired sync source is ahead of the
+ * syncing node so that the syncing node can choose to sync from the desired sync source.
+ * It first stops replication on the syncing node so that it can do a write on the desired
+ * sync source and make sure it's ahead. When replication is restarted, the desired sync
+ * source will be a valid sync source for the syncing node.
+ */
+syncFrom = function(syncingNode, desiredSyncSource, rst) {
+ jsTestLog("Forcing " + syncingNode.name + " to sync from " + desiredSyncSource.name);
+
+ // Ensure that 'desiredSyncSource' doesn't already have the dummy write sitting around from
+ // a previous syncFrom attempt.
+ var dummyName = "dummyForSyncFrom";
+ rst.getPrimary().getDB(dummyName).getCollection(dummyName).drop();
+ assert.soonNoExcept(function() {
+ return desiredSyncSource.getDB(dummyName).getCollection(dummyName).findOne() == null;
+ });
+
+ stopServerReplication(syncingNode);
+
+ assert.writeOK(rst.getPrimary().getDB(dummyName).getCollection(dummyName).insert({a: 1}));
+ // Wait for 'desiredSyncSource' to get the dummy write we just did so we know it's
+ // definitely ahead of 'syncingNode' before we call replSetSyncFrom.
+ assert.soonNoExcept(function() {
+ return desiredSyncSource.getDB(dummyName).getCollection(dummyName).findOne({a: 1});
+ });
+
+ assert.commandWorked(syncingNode.adminCommand({replSetSyncFrom: desiredSyncSource.name}));
+ restartServerReplication(syncingNode);
+ rst.awaitSyncSource(syncingNode, desiredSyncSource);
+};
+
+/**
+ * Calls a function 'f' once a second until it returns true. Throws an exception once 'f' has
+ * been called more than 'retries' times without returning true. If 'retries' is not given,
+ * it defaults to 200. 'retries' must be an integer greater than or equal to zero.
+ */
+wait = function(f, msg, retries) {
+ w++;
+ var n = 0;
+ var default_retries = 200;
+ var delay_interval_ms = 1000;
+
+ // Set default value if 'retries' was not given.
+ if (retries === undefined) {
+ retries = default_retries;
+ }
+ while (!f()) {
+ if (n % 4 == 0) {
+ print("Waiting " + w);
}
- while (!f()) {
- if (n % 4 == 0) {
- print("Waiting " + w);
- }
- if (++n == 4) {
- print("" + f);
- }
- if (n >= retries) {
- throw new Error('Tried ' + retries + ' times, giving up on ' + msg);
- }
- sleep(delay_interval_ms);
+ if (++n == 4) {
+ print("" + f);
}
- };
-
- /**
- * Use this to do something once every 4 iterations.
- *
- * <pre>
- * for (i=0; i<1000; i++) {
- * occasionally(function() { print("4 more iterations"); });
- * }
- * </pre>
- */
- occasionally = function(f, n) {
- var interval = n || 4;
- if (count % interval == 0) {
- f();
+ if (n >= retries) {
+ throw new Error('Tried ' + retries + ' times, giving up on ' + msg);
}
- count++;
- };
+ sleep(delay_interval_ms);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+/**
+ * Attempt to re-establish and re-authenticate a Mongo connection if it was dropped, with
+ * multiple retries.
+ *
+ * Returns upon successful re-connnection. If connection cannot be established after 200
+ * retries, throws an exception.
+ *
+ * @param conn - a Mongo connection object or DB object.
+ */
+reconnect = function(conn) {
+ var retries = 200;
+ wait(function() {
+ var db;
+ try {
+ // Make this work with either dbs or connections.
+ if (typeof (conn.getDB) == "function") {
+ db = conn.getDB('foo');
+ } else {
+ db = conn;
+ }
- /**
- * Attempt to re-establish and re-authenticate a Mongo connection if it was dropped, with
- * multiple retries.
- *
- * Returns upon successful re-connnection. If connection cannot be established after 200
- * retries, throws an exception.
- *
- * @param conn - a Mongo connection object or DB object.
- */
- reconnect = function(conn) {
- var retries = 200;
- wait(function() {
- var db;
- try {
- // Make this work with either dbs or connections.
- if (typeof(conn.getDB) == "function") {
- db = conn.getDB('foo');
- } else {
- db = conn;
- }
+ // Run a simple command to re-establish connection.
+ db.bar.stats();
- // Run a simple command to re-establish connection.
- db.bar.stats();
+ // SERVER-4241: Shell connections don't re-authenticate on reconnect.
+ if (jsTest.options().keyFile) {
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch (e) {
+ print(e);
+ return false;
+ }
+ }, retries);
+};
+
+getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+getLeastRecentOp = function({server, readConcern}) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ const oplog = server.getDB("local").oplog.rs;
+ const cursor = oplog.find().sort({$natural: 1}).limit(1).readConcern(readConcern);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon(function() {
+ var state = null;
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus: 1});
+ failCount = 0;
+ } catch (e) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print("Calling replSetGetStatus failed");
+ print(e);
+ return false;
+ }
+ occasionally(function() {
+ printjson(state);
+ }, 10);
- // SERVER-4241: Shell connections don't re-authenticate on reconnect.
- if (jsTest.options().keyFile) {
- return jsTest.authenticate(db.getMongo());
- }
- return true;
- } catch (e) {
- print(e);
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
return false;
}
- }, retries);
- };
+ }
+ printjson(state);
+ return true;
+ }, "not all members ready", timeout || 10 * 60 * 1000);
+
+ print("All members are now in state PRIMARY, SECONDARY, or ARBITER");
+};
- getLatestOp = function(server) {
- server.getDB("admin").getMongo().setSlaveOk();
- var log = server.getDB("local")['oplog.rs'];
- var cursor = log.find({}).sort({'$natural': -1}).limit(1);
- if (cursor.hasNext()) {
- return cursor.next();
+reconfig = function(rs, config, force) {
+ "use strict";
+ var admin = rs.getPrimary().getDB("admin");
+ var e;
+ var master;
+ try {
+ var reconfigCommand = {replSetReconfig: rs._updateConfigIfNotDurable(config), force: force};
+ var res = admin.runCommand(reconfigCommand);
+
+ // Retry reconfig if quorum check failed because not enough voting nodes responded.
+ if (!res.ok && res.code === ErrorCodes.NodeNotFound) {
+ print("Replset reconfig failed because quorum check failed. Retry reconfig once. " +
+ "Error: " + tojson(res));
+ res = admin.runCommand(reconfigCommand);
}
- return null;
- };
- getLeastRecentOp = function({server, readConcern}) {
- server.getDB("admin").getMongo().setSlaveOk();
- const oplog = server.getDB("local").oplog.rs;
- const cursor = oplog.find().sort({$natural: 1}).limit(1).readConcern(readConcern);
- if (cursor.hasNext()) {
- return cursor.next();
+ assert.commandWorked(res);
+ } catch (e) {
+ if (!isNetworkError(e)) {
+ throw e;
}
- return null;
- };
+ print("Calling replSetReconfig failed. " + tojson(e));
+ }
- waitForAllMembers = function(master, timeout) {
- var failCount = 0;
+ var master = rs.getPrimary().getDB("admin");
+ waitForAllMembers(master);
- assert.soon(function() {
- var state = null;
+ return master;
+};
+
+awaitOpTime = function(catchingUpNode, latestOpTimeNode) {
+ var ts, ex, opTime;
+ assert.soon(
+ function() {
try {
- state = master.getSisterDB("admin").runCommand({replSetGetStatus: 1});
- failCount = 0;
- } catch (e) {
- // Connection can get reset on replica set failover causing a socket exception
- print("Calling replSetGetStatus failed");
- print(e);
- return false;
- }
- occasionally(function() {
- printjson(state);
- }, 10);
-
- for (var m in state.members) {
- if (state.members[m].state != 1 && // PRIMARY
- state.members[m].state != 2 && // SECONDARY
- state.members[m].state != 7) { // ARBITER
- return false;
+ // The following statement extracts the timestamp field from the most recent
+ // element of
+ // the oplog, and stores it in "ts".
+ ts = getLatestOp(catchingUpNode).ts;
+ opTime = getLatestOp(latestOpTimeNode).ts;
+ if ((ts.t == opTime.t) && (ts.i == opTime.i)) {
+ return true;
}
+ ex = null;
+ return false;
+ } catch (ex) {
+ return false;
}
- printjson(state);
- return true;
- }, "not all members ready", timeout || 10 * 60 * 1000);
-
- print("All members are now in state PRIMARY, SECONDARY, or ARBITER");
- };
-
- reconfig = function(rs, config, force) {
- "use strict";
- var admin = rs.getPrimary().getDB("admin");
- var e;
- var master;
- try {
- var reconfigCommand = {
- replSetReconfig: rs._updateConfigIfNotDurable(config),
- force: force
- };
- var res = admin.runCommand(reconfigCommand);
-
- // Retry reconfig if quorum check failed because not enough voting nodes responded.
- if (!res.ok && res.code === ErrorCodes.NodeNotFound) {
- print("Replset reconfig failed because quorum check failed. Retry reconfig once. " +
- "Error: " + tojson(res));
- res = admin.runCommand(reconfigCommand);
+ },
+ function() {
+ var message = "Node " + catchingUpNode + " only reached optime " + tojson(ts) +
+ " not " + tojson(opTime);
+ if (ex) {
+ message += "; last attempt failed with exception " + tojson(ex);
}
-
- assert.commandWorked(res);
- } catch (e) {
- if (!isNetworkError(e)) {
- throw e;
+ return message;
+ });
+};
+
+/**
+ * Uses the results of running replSetGetStatus against an arbitrary replset node to wait until
+ * all nodes in the set are replicated through the same optime.
+ * 'rs' is an array of connections to replica set nodes. This function is useful when you
+ * don't have a ReplSetTest object to use, otherwise ReplSetTest.awaitReplication is preferred.
+ */
+waitUntilAllNodesCaughtUp = function(rs, timeout) {
+ var rsStatus;
+ var firstConflictingIndex;
+ var ot;
+ var otherOt;
+ assert.soon(
+ function() {
+ rsStatus = rs[0].adminCommand('replSetGetStatus');
+ if (rsStatus.ok != 1) {
+ return false;
}
- print("Calling replSetReconfig failed. " + tojson(e));
- }
-
- var master = rs.getPrimary().getDB("admin");
- waitForAllMembers(master);
-
- return master;
- };
-
- awaitOpTime = function(catchingUpNode, latestOpTimeNode) {
- var ts, ex, opTime;
- assert.soon(
- function() {
- try {
- // The following statement extracts the timestamp field from the most recent
- // element of
- // the oplog, and stores it in "ts".
- ts = getLatestOp(catchingUpNode).ts;
- opTime = getLatestOp(latestOpTimeNode).ts;
- if ((ts.t == opTime.t) && (ts.i == opTime.i)) {
- return true;
- }
- ex = null;
+ assert.eq(rs.length, rsStatus.members.length, tojson(rsStatus));
+ ot = rsStatus.members[0].optime;
+ for (var i = 1; i < rsStatus.members.length; ++i) {
+ var otherNode = rsStatus.members[i];
+
+ // Must be in PRIMARY or SECONDARY state.
+ if (otherNode.state != ReplSetTest.State.PRIMARY &&
+ otherNode.state != ReplSetTest.State.SECONDARY) {
return false;
- } catch (ex) {
- return false;
- }
- },
- function() {
- var message = "Node " + catchingUpNode + " only reached optime " + tojson(ts) +
- " not " + tojson(opTime);
- if (ex) {
- message += "; last attempt failed with exception " + tojson(ex);
}
- return message;
- });
- };
- /**
- * Uses the results of running replSetGetStatus against an arbitrary replset node to wait until
- * all nodes in the set are replicated through the same optime.
- * 'rs' is an array of connections to replica set nodes. This function is useful when you
- * don't have a ReplSetTest object to use, otherwise ReplSetTest.awaitReplication is preferred.
- */
- waitUntilAllNodesCaughtUp = function(rs, timeout) {
- var rsStatus;
- var firstConflictingIndex;
- var ot;
- var otherOt;
- assert.soon(
- function() {
- rsStatus = rs[0].adminCommand('replSetGetStatus');
- if (rsStatus.ok != 1) {
+ // Fail if optimes are not equal.
+ otherOt = otherNode.optime;
+ if (!friendlyEqual(otherOt, ot)) {
+ firstConflictingIndex = i;
return false;
}
- assert.eq(rs.length, rsStatus.members.length, tojson(rsStatus));
- ot = rsStatus.members[0].optime;
- for (var i = 1; i < rsStatus.members.length; ++i) {
- var otherNode = rsStatus.members[i];
-
- // Must be in PRIMARY or SECONDARY state.
- if (otherNode.state != ReplSetTest.State.PRIMARY &&
- otherNode.state != ReplSetTest.State.SECONDARY) {
- return false;
- }
-
- // Fail if optimes are not equal.
- otherOt = otherNode.optime;
- if (!friendlyEqual(otherOt, ot)) {
- firstConflictingIndex = i;
- return false;
- }
- }
- return true;
- },
- function() {
- return "Optimes of members 0 (" + tojson(ot) + ") and " + firstConflictingIndex +
- " (" + tojson(otherOt) + ") are different in " + tojson(rsStatus);
- },
- timeout);
- };
-
- /**
- * Waits for the given node to reach the given state, ignoring network errors. Ensures that the
- * connection is re-connected and usable when the function returns.
- */
- waitForState = function(node, state) {
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand(
- {replSetTest: 1, waitForMemberState: state, timeoutMillis: 60 * 1000 * 5}));
- return true;
- });
- // Some state transitions cause connections to be closed, but whether the connection close
- // happens before or after the replSetTest command above returns is racy, so to ensure that
- // the connection to 'node' is usable after this function returns, reconnect it first.
- reconnect(node);
- };
-
- /**
- * Starts each node in the given replica set if the storage engine supports readConcern
- *'majority'.
- * Returns true if the replica set was started successfully and false otherwise.
- *
- * @param replSetTest - The instance of {@link ReplSetTest} to start
- * @param options - The options passed to {@link ReplSetTest.startSet}
- */
- startSetIfSupportsReadMajority = function(replSetTest, options) {
- replSetTest.startSet(options);
- return replSetTest.nodes[0]
- .adminCommand("serverStatus")
- .storageEngine.supportsCommittedReads;
- };
-
- /**
- * Performs a reInitiate() call on 'replSetTest', ignoring errors that are related to an aborted
- * secondary member. All other errors are rethrown.
- */
- reInitiateWithoutThrowingOnAbortedMember = function(replSetTest) {
- try {
- replSetTest.reInitiate();
- } catch (e) {
- // reInitiate can throw because it tries to run an ismaster command on
- // all secondaries, including the new one that may have already aborted
- const errMsg = tojson(e);
- if (isNetworkError(e)) {
- // Ignore these exceptions, which are indicative of an aborted node
- } else {
- throw e;
}
+ return true;
+ },
+ function() {
+ return "Optimes of members 0 (" + tojson(ot) + ") and " + firstConflictingIndex + " (" +
+ tojson(otherOt) + ") are different in " + tojson(rsStatus);
+ },
+ timeout);
+};
+
+/**
+ * Waits for the given node to reach the given state, ignoring network errors. Ensures that the
+ * connection is re-connected and usable when the function returns.
+ */
+waitForState = function(node, state) {
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand(
+ {replSetTest: 1, waitForMemberState: state, timeoutMillis: 60 * 1000 * 5}));
+ return true;
+ });
+ // Some state transitions cause connections to be closed, but whether the connection close
+ // happens before or after the replSetTest command above returns is racy, so to ensure that
+ // the connection to 'node' is usable after this function returns, reconnect it first.
+ reconnect(node);
+};
+
+/**
+ * Starts each node in the given replica set if the storage engine supports readConcern
+ *'majority'.
+ * Returns true if the replica set was started successfully and false otherwise.
+ *
+ * @param replSetTest - The instance of {@link ReplSetTest} to start
+ * @param options - The options passed to {@link ReplSetTest.startSet}
+ */
+startSetIfSupportsReadMajority = function(replSetTest, options) {
+ replSetTest.startSet(options);
+ return replSetTest.nodes[0].adminCommand("serverStatus").storageEngine.supportsCommittedReads;
+};
+
+/**
+ * Performs a reInitiate() call on 'replSetTest', ignoring errors that are related to an aborted
+ * secondary member. All other errors are rethrown.
+ */
+reInitiateWithoutThrowingOnAbortedMember = function(replSetTest) {
+ try {
+ replSetTest.reInitiate();
+ } catch (e) {
+ // reInitiate can throw because it tries to run an ismaster command on
+ // all secondaries, including the new one that may have already aborted
+ const errMsg = tojson(e);
+ if (isNetworkError(e)) {
+ // Ignore these exceptions, which are indicative of an aborted node
+ } else {
+ throw e;
+ }
+ }
+};
+
+/**
+ * Waits for the specified hosts to enter a certain state.
+ */
+awaitRSClientHosts = function(conn, host, hostOk, rs, timeout) {
+ var hostCount = host.length;
+ if (hostCount) {
+ for (var i = 0; i < hostCount; i++) {
+ awaitRSClientHosts(conn, host[i], hostOk, rs);
}
- };
- /**
- * Waits for the specified hosts to enter a certain state.
- */
- awaitRSClientHosts = function(conn, host, hostOk, rs, timeout) {
- var hostCount = host.length;
- if (hostCount) {
- for (var i = 0; i < hostCount; i++) {
- awaitRSClientHosts(conn, host[i], hostOk, rs);
- }
+ return;
+ }
- return;
- }
+ timeout = timeout || 5 * 60 * 1000;
- timeout = timeout || 5 * 60 * 1000;
+ if (hostOk == undefined)
+ hostOk = {ok: true};
+ if (host.host)
+ host = host.host;
+ if (rs)
+ rs = rs.name;
- if (hostOk == undefined)
- hostOk = {ok: true};
- if (host.host)
- host = host.host;
- if (rs)
- rs = rs.name;
+ print("Awaiting " + host + " to be " + tojson(hostOk) + " for " + conn + " (rs: " + rs + ")");
- print("Awaiting " + host + " to be " + tojson(hostOk) + " for " + conn + " (rs: " + rs +
- ")");
+ var tests = 0;
- var tests = 0;
+ assert.soon(function() {
+ var rsClientHosts = conn.adminCommand('connPoolStats').replicaSets;
+ if (tests++ % 10 == 0) {
+ printjson(rsClientHosts);
+ }
- assert.soon(function() {
- var rsClientHosts = conn.adminCommand('connPoolStats').replicaSets;
- if (tests++ % 10 == 0) {
- printjson(rsClientHosts);
- }
+ for (var rsName in rsClientHosts) {
+ if (rs && rs != rsName)
+ continue;
- for (var rsName in rsClientHosts) {
- if (rs && rs != rsName)
+ for (var i = 0; i < rsClientHosts[rsName].hosts.length; i++) {
+ var clientHost = rsClientHosts[rsName].hosts[i];
+ if (clientHost.addr != host)
continue;
- for (var i = 0; i < rsClientHosts[rsName].hosts.length; i++) {
- var clientHost = rsClientHosts[rsName].hosts[i];
- if (clientHost.addr != host)
- continue;
+ // Check that *all* host properties are set correctly
+ var propOk = true;
+ for (var prop in hostOk) {
+ // Use special comparator for tags because isMaster can return the fields in
+ // different order. The fields of the tags should be treated like a set of
+ // strings and 2 tags should be considered the same if the set is equal.
+ if (prop == 'tags') {
+ if (!clientHost.tags) {
+ propOk = false;
+ break;
+ }
- // Check that *all* host properties are set correctly
- var propOk = true;
- for (var prop in hostOk) {
- // Use special comparator for tags because isMaster can return the fields in
- // different order. The fields of the tags should be treated like a set of
- // strings and 2 tags should be considered the same if the set is equal.
- if (prop == 'tags') {
- if (!clientHost.tags) {
+ for (var hostTag in hostOk.tags) {
+ if (clientHost.tags[hostTag] != hostOk.tags[hostTag]) {
propOk = false;
break;
}
-
- for (var hostTag in hostOk.tags) {
- if (clientHost.tags[hostTag] != hostOk.tags[hostTag]) {
- propOk = false;
- break;
- }
- }
-
- for (var clientTag in clientHost.tags) {
- if (clientHost.tags[clientTag] != hostOk.tags[clientTag]) {
- propOk = false;
- break;
- }
- }
-
- continue;
}
- if (isObject(hostOk[prop])) {
- if (!friendlyEqual(hostOk[prop], clientHost[prop])) {
+ for (var clientTag in clientHost.tags) {
+ if (clientHost.tags[clientTag] != hostOk.tags[clientTag]) {
propOk = false;
break;
}
- } else if (clientHost[prop] != hostOk[prop]) {
+ }
+
+ continue;
+ }
+
+ if (isObject(hostOk[prop])) {
+ if (!friendlyEqual(hostOk[prop], clientHost[prop])) {
propOk = false;
break;
}
+ } else if (clientHost[prop] != hostOk[prop]) {
+ propOk = false;
+ break;
}
+ }
- if (propOk) {
- return true;
- }
+ if (propOk) {
+ return true;
}
}
+ }
- return false;
- }, 'timed out waiting for replica set client to recognize hosts', timeout);
- };
-
- /**
- * Returns the last opTime of the connection based from replSetGetStatus. Can only
- * be used on replica set nodes.
- */
- getLastOpTime = function(conn) {
- var replSetStatus =
- assert.commandWorked(conn.getDB("admin").runCommand({replSetGetStatus: 1}));
- var connStatus = replSetStatus.members.filter(m => m.self)[0];
- return connStatus.optime;
- };
-
- /**
- * Set log verbosity on all given nodes.
- * e.g. setLogVerbosity(replTest.nodes, { "replication": {"verbosity": 3} });
- */
- setLogVerbosity = function(nodes, logVerbosity) {
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": logVerbosity,
- };
- nodes.forEach(function(node) {
- assert.commandWorked(node.adminCommand(verbosity));
- });
+ return false;
+ }, 'timed out waiting for replica set client to recognize hosts', timeout);
+};
+
+/**
+ * Returns the last opTime of the connection based from replSetGetStatus. Can only
+ * be used on replica set nodes.
+ */
+getLastOpTime = function(conn) {
+ var replSetStatus = assert.commandWorked(conn.getDB("admin").runCommand({replSetGetStatus: 1}));
+ var connStatus = replSetStatus.members.filter(m => m.self)[0];
+ return connStatus.optime;
+};
+
+/**
+ * Set log verbosity on all given nodes.
+ * e.g. setLogVerbosity(replTest.nodes, { "replication": {"verbosity": 3} });
+ */
+setLogVerbosity = function(nodes, logVerbosity) {
+ var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": logVerbosity,
};
-
+ nodes.forEach(function(node) {
+ assert.commandWorked(node.adminCommand(verbosity));
+ });
+};
}());
diff --git a/jstests/replsets/secondary_as_sync_source.js b/jstests/replsets/secondary_as_sync_source.js
index 6f446842daa..ec18cebff0d 100644
--- a/jstests/replsets/secondary_as_sync_source.js
+++ b/jstests/replsets/secondary_as_sync_source.js
@@ -5,84 +5,84 @@
* @tags: [requires_replication]
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/rslib.js");
+load("jstests/replsets/rslib.js");
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- const firstIndexName = "_first";
+const firstIndexName = "_first";
- function addTestDocuments(db) {
- let size = 100;
- jsTest.log("Creating " + size + " test documents.");
- var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
- for (var i = 0; i < size; ++i) {
- bulk.insert({i: i});
- }
- assert.writeOK(bulk.execute());
+function addTestDocuments(db) {
+ let size = 100;
+ jsTest.log("Creating " + size + " test documents.");
+ var bulk = db.getCollection(collName).initializeUnorderedBulkOp();
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
+ assert.writeOK(bulk.execute());
+}
- let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, useBridge: true});
- let nodes = replSet.nodeList();
+let replSet = new ReplSetTest({name: "indexBuilds", nodes: 2, useBridge: true});
+let nodes = replSet.nodeList();
- replSet.startSet({startClean: true});
- replSet.initiate({
- _id: "indexBuilds",
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], votes: 0, priority: 0},
- ]
- });
+replSet.startSet({startClean: true});
+replSet.initiate({
+ _id: "indexBuilds",
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], votes: 0, priority: 0},
+ ]
+});
- let primary = replSet.getPrimary();
- let primaryDB = primary.getDB(dbName);
+let primary = replSet.getPrimary();
+let primaryDB = primary.getDB(dbName);
- let secondary = replSet.getSecondary();
- let secondaryDB = secondary.getDB(dbName);
+let secondary = replSet.getSecondary();
+let secondaryDB = secondary.getDB(dbName);
- addTestDocuments(primaryDB);
+addTestDocuments(primaryDB);
- jsTest.log("Hanging index builds on the secondary node");
- assert.commandWorked(secondaryDB.adminCommand(
- {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
+jsTest.log("Hanging index builds on the secondary node");
+assert.commandWorked(secondaryDB.adminCommand(
+ {configureFailPoint: "hangAfterStartingIndexBuild", mode: "alwaysOn"}));
- jsTest.log("Beginning index build: " + firstIndexName);
- assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: [{key: {i: 1}, name: firstIndexName, background: true}],
- writeConcern: {w: 2}
- }));
+jsTest.log("Beginning index build: " + firstIndexName);
+assert.commandWorked(primaryDB.runCommand({
+ createIndexes: collName,
+ indexes: [{key: {i: 1}, name: firstIndexName, background: true}],
+ writeConcern: {w: 2}
+}));
- jsTest.log("Adding a new node to the replica set");
- let newNode = replSet.add({rsConfig: {votes: 0, priority: 0}});
+jsTest.log("Adding a new node to the replica set");
+let newNode = replSet.add({rsConfig: {votes: 0, priority: 0}});
- // Ensure that the new node and primary cannot communicate to each other.
- newNode.disconnect(primary);
+// Ensure that the new node and primary cannot communicate to each other.
+newNode.disconnect(primary);
- replSet.reInitiate();
+replSet.reInitiate();
- // Wait for the new node to finish initial sync.
- waitForState(newNode, ReplSetTest.State.SECONDARY);
+// Wait for the new node to finish initial sync.
+waitForState(newNode, ReplSetTest.State.SECONDARY);
- // Let the 'secondary' finish its index build.
- jsTest.log("Removing index build hang on the secondary node to allow it to finish");
- assert.commandWorked(
- secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
+// Let the 'secondary' finish its index build.
+jsTest.log("Removing index build hang on the secondary node to allow it to finish");
+assert.commandWorked(
+ secondaryDB.adminCommand({configureFailPoint: "hangAfterStartingIndexBuild", mode: "off"}));
- // Wait for the index builds to finish.
- replSet.waitForAllIndexBuildsToFinish(dbName, collName);
- jsTest.log("Checking if the indexes match between the new node and the secondary node");
+// Wait for the index builds to finish.
+replSet.waitForAllIndexBuildsToFinish(dbName, collName);
+jsTest.log("Checking if the indexes match between the new node and the secondary node");
- let newNodeDB = newNode.getDB(dbName);
- jsTest.log("New nodes indexes:");
- printjson(newNodeDB.getCollection(collName).getIndexes());
- jsTest.log("Secondary nodes indexes:");
- printjson(secondaryDB.getCollection(collName).getIndexes());
+let newNodeDB = newNode.getDB(dbName);
+jsTest.log("New nodes indexes:");
+printjson(newNodeDB.getCollection(collName).getIndexes());
+jsTest.log("Secondary nodes indexes:");
+printjson(secondaryDB.getCollection(collName).getIndexes());
- assert.eq(newNodeDB.getCollection(collName).getIndexes().length,
- secondaryDB.getCollection(collName).getIndexes().length);
+assert.eq(newNodeDB.getCollection(collName).getIndexes().length,
+ secondaryDB.getCollection(collName).getIndexes().length);
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/secondary_reads_timestamp_visibility.js b/jstests/replsets/secondary_reads_timestamp_visibility.js
index b0d213f91f0..4b981b72cc8 100644
--- a/jstests/replsets/secondary_reads_timestamp_visibility.js
+++ b/jstests/replsets/secondary_reads_timestamp_visibility.js
@@ -7,96 +7,94 @@
*
*/
(function() {
- "use strict";
-
- load('jstests/replsets/libs/secondary_reads_test.js');
-
- const name = "secondaryReadsTimestampVisibility";
- const collName = "testColl";
- let secondaryReadsTest = new SecondaryReadsTest(name);
- let replSet = secondaryReadsTest.getReplset();
-
- let primaryDB = secondaryReadsTest.getPrimaryDB();
- let secondaryDB = secondaryReadsTest.getSecondaryDB();
-
- if (!primaryDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
- secondaryReadsTest.stop();
- return;
- }
- let primaryColl = primaryDB.getCollection(collName);
-
- // Create a collection and an index. Insert some data.
- primaryDB.runCommand({drop: collName});
- assert.commandWorked(primaryDB.runCommand({create: collName}));
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: collName, indexes: [{key: {y: 1}, name: "y_1", unique: true}]}));
- for (let i = 0; i < 100; i++) {
- assert.commandWorked(primaryColl.insert({_id: i, x: 0, y: i + 1}));
- }
-
- replSet.awaitLastOpCommitted();
- // This function includes a call to awaitReplication().
- replSet.waitForAllIndexBuildsToFinish(primaryDB.getName(), collName);
-
- // Sanity check.
- assert.eq(secondaryDB.getCollection(collName).find({x: 0}).itcount(), 100);
- assert.eq(secondaryDB.getCollection(collName).find({y: {$gte: 1, $lt: 101}}).itcount(), 100);
-
- // Prevent a batch from completing on the secondary.
- let pauseAwait = secondaryReadsTest.pauseSecondaryBatchApplication();
-
- // Update x to 1 in each document with default writeConcern and make sure we see the correct
- // data on the primary.
- let updates = [];
- for (let i = 0; i < 100; i++) {
- updates[i] = {q: {_id: i}, u: {x: 1, y: i}};
- }
- assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
- assert.eq(primaryColl.find({x: 1}).itcount(), 100);
- assert.eq(primaryColl.find({y: {$gte: 0, $lt: 100}}).itcount(), 100);
-
- // Wait for the batch application to pause.
- pauseAwait();
-
- let levels = ["local", "available", "majority"];
-
- if (!primaryDB.serverStatus().storageEngine.supportsCommittedReads) {
- levels = ["local", "available"];
- }
-
- // We should see the previous, un-replicated state on the secondary with every readconcern.
- for (let i in levels) {
- print("Checking that no new updates are visible yet for readConcern: " + levels[i]);
- assert.eq(secondaryDB.getCollection(collName).find({x: 0}).readConcern(levels[i]).itcount(),
- 100);
- assert.eq(secondaryDB.getCollection(collName).find({x: 1}).readConcern(levels[i]).itcount(),
- 0);
- assert.eq(secondaryDB.getCollection(collName)
- .find({y: {$gte: 1, $lt: 101}})
- .readConcern(levels[i])
- .itcount(),
- 100);
- }
-
- // Disable the failpoint and let the batch complete.
- secondaryReadsTest.resumeSecondaryBatchApplication();
-
- // Wait for the last op to appear in the majority committed snapshot on each node. This ensures
- // that the op will be visible to a "majority" read.
- replSet.awaitLastOpCommitted();
-
- // Wait for the last op to be replicated to all nodes. This is needed because when majority read
- // concern is disabled, awaitLastOpCommitted() just checks the node's knowledge of the majority
- // commit point and does not ensure the node has applied the operations.
- replSet.awaitReplication();
-
- for (let i in levels) {
- print("Checking that new updates are visible for readConcern: " + levels[i]);
- // We should see the new state on the secondary with every readconcern.
- assert.eq(secondaryDB.getCollection(collName).find({x: 0}).readConcern(levels[i]).itcount(),
- 0);
- assert.eq(secondaryDB.getCollection(collName).find({x: 1}).readConcern(levels[i]).itcount(),
- 100);
- }
+"use strict";
+
+load('jstests/replsets/libs/secondary_reads_test.js');
+
+const name = "secondaryReadsTimestampVisibility";
+const collName = "testColl";
+let secondaryReadsTest = new SecondaryReadsTest(name);
+let replSet = secondaryReadsTest.getReplset();
+
+let primaryDB = secondaryReadsTest.getPrimaryDB();
+let secondaryDB = secondaryReadsTest.getSecondaryDB();
+
+if (!primaryDB.serverStatus().storageEngine.supportsSnapshotReadConcern) {
secondaryReadsTest.stop();
+ return;
+}
+let primaryColl = primaryDB.getCollection(collName);
+
+// Create a collection and an index. Insert some data.
+primaryDB.runCommand({drop: collName});
+assert.commandWorked(primaryDB.runCommand({create: collName}));
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {y: 1}, name: "y_1", unique: true}]}));
+for (let i = 0; i < 100; i++) {
+ assert.commandWorked(primaryColl.insert({_id: i, x: 0, y: i + 1}));
+}
+
+replSet.awaitLastOpCommitted();
+// This function includes a call to awaitReplication().
+replSet.waitForAllIndexBuildsToFinish(primaryDB.getName(), collName);
+
+// Sanity check.
+assert.eq(secondaryDB.getCollection(collName).find({x: 0}).itcount(), 100);
+assert.eq(secondaryDB.getCollection(collName).find({y: {$gte: 1, $lt: 101}}).itcount(), 100);
+
+// Prevent a batch from completing on the secondary.
+let pauseAwait = secondaryReadsTest.pauseSecondaryBatchApplication();
+
+// Update x to 1 in each document with default writeConcern and make sure we see the correct
+// data on the primary.
+let updates = [];
+for (let i = 0; i < 100; i++) {
+ updates[i] = {q: {_id: i}, u: {x: 1, y: i}};
+}
+assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
+assert.eq(primaryColl.find({x: 1}).itcount(), 100);
+assert.eq(primaryColl.find({y: {$gte: 0, $lt: 100}}).itcount(), 100);
+
+// Wait for the batch application to pause.
+pauseAwait();
+
+let levels = ["local", "available", "majority"];
+
+if (!primaryDB.serverStatus().storageEngine.supportsCommittedReads) {
+ levels = ["local", "available"];
+}
+
+// We should see the previous, un-replicated state on the secondary with every readconcern.
+for (let i in levels) {
+ print("Checking that no new updates are visible yet for readConcern: " + levels[i]);
+ assert.eq(secondaryDB.getCollection(collName).find({x: 0}).readConcern(levels[i]).itcount(),
+ 100);
+ assert.eq(secondaryDB.getCollection(collName).find({x: 1}).readConcern(levels[i]).itcount(), 0);
+ assert.eq(secondaryDB.getCollection(collName)
+ .find({y: {$gte: 1, $lt: 101}})
+ .readConcern(levels[i])
+ .itcount(),
+ 100);
+}
+
+// Disable the failpoint and let the batch complete.
+secondaryReadsTest.resumeSecondaryBatchApplication();
+
+// Wait for the last op to appear in the majority committed snapshot on each node. This ensures
+// that the op will be visible to a "majority" read.
+replSet.awaitLastOpCommitted();
+
+// Wait for the last op to be replicated to all nodes. This is needed because when majority read
+// concern is disabled, awaitLastOpCommitted() just checks the node's knowledge of the majority
+// commit point and does not ensure the node has applied the operations.
+replSet.awaitReplication();
+
+for (let i in levels) {
+ print("Checking that new updates are visible for readConcern: " + levels[i]);
+ // We should see the new state on the secondary with every readconcern.
+ assert.eq(secondaryDB.getCollection(collName).find({x: 0}).readConcern(levels[i]).itcount(), 0);
+ assert.eq(secondaryDB.getCollection(collName).find({x: 1}).readConcern(levels[i]).itcount(),
+ 100);
+}
+secondaryReadsTest.stop();
})();
diff --git a/jstests/replsets/secondary_reads_unique_indexes.js b/jstests/replsets/secondary_reads_unique_indexes.js
index feff1df4545..7941eb8edb3 100644
--- a/jstests/replsets/secondary_reads_unique_indexes.js
+++ b/jstests/replsets/secondary_reads_unique_indexes.js
@@ -27,86 +27,86 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/replsets/libs/secondary_reads_test.js");
+load("jstests/replsets/libs/secondary_reads_test.js");
- const name = "secondaryReadsUniqueIndexes";
- const collName = "testColl";
- let secondaryReadsTest = new SecondaryReadsTest(name);
+const name = "secondaryReadsUniqueIndexes";
+const collName = "testColl";
+let secondaryReadsTest = new SecondaryReadsTest(name);
- let primaryDB = secondaryReadsTest.getPrimaryDB();
- let secondaryDB = secondaryReadsTest.getSecondaryDB();
+let primaryDB = secondaryReadsTest.getPrimaryDB();
+let secondaryDB = secondaryReadsTest.getSecondaryDB();
- // Setup collection.
- primaryDB.runCommand({drop: collName});
- assert.commandWorked(primaryDB.runCommand({create: collName}));
+// Setup collection.
+primaryDB.runCommand({drop: collName});
+assert.commandWorked(primaryDB.runCommand({create: collName}));
- // Create a unique index on the collection in the foreground.
- assert.commandWorked(primaryDB.runCommand(
- {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1", unique: true}]}));
+// Create a unique index on the collection in the foreground.
+assert.commandWorked(primaryDB.runCommand(
+ {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1", unique: true}]}));
- let replSet = secondaryReadsTest.getReplset();
- replSet.awaitReplication();
+let replSet = secondaryReadsTest.getReplset();
+replSet.awaitReplication();
- // We want to do updates with at least as many different documents as there are parallel batch
- // writer threads (16). Each iteration increments and decrements a uniquely indexed value, 'x'.
- // The goal is that a reader on a secondary might find a case where the unique index constraint
- // is ignored, and an index on x maps to two different records.
- const nOps = 16;
- const nIterations = 50;
- const nReaders = 16;
+// We want to do updates with at least as many different documents as there are parallel batch
+// writer threads (16). Each iteration increments and decrements a uniquely indexed value, 'x'.
+// The goal is that a reader on a secondary might find a case where the unique index constraint
+// is ignored, and an index on x maps to two different records.
+const nOps = 16;
+const nIterations = 50;
+const nReaders = 16;
- // Do a bunch of reads using the 'x' index on the secondary.
- // No errors should be encountered on the secondary.
- let readFn = function() {
- for (let x = 0; x < TestData.nOps; x++) {
- assert.commandWorked(db.runCommand({
- find: TestData.collName,
- filter: {x: x},
- projection: {x: 1},
- readConcern: {level: "local"},
- }));
- // Sleep a bit to make these reader threads less CPU intensive.
- sleep(60);
- }
- };
- TestData.nOps = nOps;
- TestData.collName = collName;
- secondaryReadsTest.startSecondaryReaders(nReaders, readFn);
+// Do a bunch of reads using the 'x' index on the secondary.
+// No errors should be encountered on the secondary.
+let readFn = function() {
+ for (let x = 0; x < TestData.nOps; x++) {
+ assert.commandWorked(db.runCommand({
+ find: TestData.collName,
+ filter: {x: x},
+ projection: {x: 1},
+ readConcern: {level: "local"},
+ }));
+ // Sleep a bit to make these reader threads less CPU intensive.
+ sleep(60);
+ }
+};
+TestData.nOps = nOps;
+TestData.collName = collName;
+secondaryReadsTest.startSecondaryReaders(nReaders, readFn);
+
+// Write the initial documents. Ensure they have been replicated.
+for (let i = 0; i < nOps; i++) {
+ assert.commandWorked(
+ primaryDB.runCommand({insert: collName, documents: [{_id: i, x: i, iter: 0}]}));
+}
+replSet.awaitReplication();
- // Write the initial documents. Ensure they have been replicated.
+// Cycle the value of x in the document {_id: i, x: i} between i and i+1 each iteration.
+for (let iteration = 0; iteration < nIterations; iteration++) {
+ let updates = [];
+ // Reset each document.
for (let i = 0; i < nOps; i++) {
- assert.commandWorked(
- primaryDB.runCommand({insert: collName, documents: [{_id: i, x: i, iter: 0}]}));
+ updates[i] = {q: {_id: i}, u: {x: i, iter: iteration}};
}
- replSet.awaitReplication();
- // Cycle the value of x in the document {_id: i, x: i} between i and i+1 each iteration.
- for (let iteration = 0; iteration < nIterations; iteration++) {
- let updates = [];
- // Reset each document.
- for (let i = 0; i < nOps; i++) {
- updates[i] = {q: {_id: i}, u: {x: i, iter: iteration}};
- }
+ assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
+ updates = [];
- assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
- updates = [];
-
- // Generate updates that increment x on each document backwards by _id to avoid conficts
- // when applied in-order. When these updates get applied to the secondary, they may get
- // applied out of order by different threads and temporarily violate unique index
- // constraints.
- for (let i = 0; i < nOps; i++) {
- // Start at the end and increment x by 1.
- let end = nOps - i - 1;
- let nextX = end + 1;
- updates[i] = {q: {_id: end}, u: {x: nextX, iter: iteration}};
- }
- print("iteration " + iteration);
- assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
+ // Generate updates that increment x on each document backwards by _id to avoid conficts
+ // when applied in-order. When these updates get applied to the secondary, they may get
+ // applied out of order by different threads and temporarily violate unique index
+ // constraints.
+ for (let i = 0; i < nOps; i++) {
+ // Start at the end and increment x by 1.
+ let end = nOps - i - 1;
+ let nextX = end + 1;
+ updates[i] = {q: {_id: end}, u: {x: nextX, iter: iteration}};
}
+ print("iteration " + iteration);
+ assert.commandWorked(primaryDB.runCommand({update: collName, updates: updates}));
+}
- replSet.awaitReplication();
- secondaryReadsTest.stop();
+replSet.awaitReplication();
+secondaryReadsTest.stop();
})();
diff --git a/jstests/replsets/server8070.js b/jstests/replsets/server8070.js
index 500def42a51..7f821e0c61d 100644
--- a/jstests/replsets/server8070.js
+++ b/jstests/replsets/server8070.js
@@ -4,146 +4,140 @@
// to sync from member2.
(function() {
- "use strict";
-
- load('jstests/libs/write_concern_util.js');
- load("jstests/replsets/rslib.js");
-
- // helper to ensure two nodes are at the same place in the oplog
- var waitForSameOplogPosition = function(db1, db2, errmsg) {
- assert.soon(function() {
- var last1 =
- db1.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
- var last2 =
- db2.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
- jsTest.log("primary: " + tojson(last1) + " secondary: " + tojson(last2));
-
- return ((last1.ts.t === last2.ts.t) && (last1.ts.i === last2.ts.i));
- }, errmsg);
- };
-
- // start set
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- replSet.startSet();
- replSet.initiate({
- _id: 'testSet',
- members: [
- {_id: 0, host: getHostName() + ":" + replSet.ports[0]},
- {_id: 1, host: getHostName() + ":" + replSet.ports[1], priority: 0},
- {_id: 2, host: getHostName() + ":" + replSet.ports[2], priority: 0}
- ],
- settings: {chainingAllowed: false}
- });
-
- // set up common points of access
- var master = replSet.getPrimary();
- var primary = master.getDB("foo");
- replSet.nodes[1].setSlaveOk();
- replSet.nodes[2].setSlaveOk();
- var member2 = replSet.nodes[1].getDB("admin");
- var member3 = replSet.nodes[2].getDB("admin");
-
- // Do an initial write
- master.getDB("foo").bar.insert({x: 1});
- replSet.awaitReplication();
-
- jsTest.log("Make sure 2 & 3 are syncing from the primary");
- assert.eq(master, replSet.nodes[0]);
- syncFrom(replSet.nodes[1], master, replSet);
- syncFrom(replSet.nodes[2], master, replSet);
-
- jsTest.log("Stop 2's replication");
- member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
-
- jsTest.log("Do a few writes");
- for (var i = 0; i < 25; i++) {
- primary.bar.insert({x: i});
- }
-
- jsTest.log("Make sure 3 is at write #25");
- waitForSameOplogPosition(primary, member3, "node 3 failed to catch up to the primary");
- // This means 3's buffer is empty
-
- jsTest.log("Stop 3's replication");
- member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
- // logLevel 3 will allow us to see each op the secondary pulls from the primary so that we can
- // determine whether or not all ops are actually being pulled
- member3.runCommand({setParameter: 1, logLevel: 3});
-
- jsTest.log("Start 2's replication");
- member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
-
- jsTest.log("Do some writes");
- for (var i = 25; i < 50; i++) {
- primary.bar.insert({x: i});
- }
-
- jsTest.log("Make sure 2 is at write #50");
- waitForSameOplogPosition(primary, member2, "node 2 failed to catch up to the primary");
- // This means 2's buffer is empty
-
- jsTest.log("Stop 2's replication");
- member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
-
- jsTest.log(
- "Do some writes - 2 & 3 should have up to write #75 in their buffers, but unapplied");
- for (var i = 50; i < 75; i++) {
- primary.bar.insert({x: i});
- }
- var primaryCollectionSize = primary.bar.find().itcount();
- jsTest.log("primary collection size: " + primaryCollectionSize);
- var last = primary.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
-
- jsTest.log("waiting a bit for the secondaries to get the write");
- sleep(10000);
-
- jsTest.log("Shut down the primary");
- replSet.stop(0);
-
- // make sure 3 doesn't try to sync from 2
- // the sleep 30sec is a hold over from the unsafe assert.throws(assert.soon())
- // which would check for 30 seconds that node 3 didn't try to sync from 2
- sleep(30 * 1000);
- jsTest.log("3 should not attempt to sync from 2, as it cannot clear its buffer");
- var syncingTo = member3.adminCommand({replSetGetStatus: 1}).syncingTo;
- assert(syncingTo !== getHostName() + ":" + replSet.ports[1],
- "node 3 is syncing from node 2 :(");
-
- jsTest.log("Pause 3's bgsync thread");
- stopServerReplication(member3.getMongo());
-
- // count documents in member 3
- assert.eq(26,
- member3.getSisterDB("foo").bar.find().itcount(),
- "collection size incorrect on node 3 before applying ops 25-75");
-
- jsTest.log("Allow 3 to apply ops 25-75");
- assert.commandWorked(member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
- "member 3 rsSyncApplyStop admin command failed");
+"use strict";
- assert.soon(function() {
- var last3 =
- member3.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
- jsTest.log("primary: " + tojson(last, '', true) + " secondary: " + tojson(last3, '', true));
- jsTest.log("member 3 collection size: " + member3.getSisterDB("foo").bar.find().itcount());
- jsTest.log("curop: ");
- printjson(member3.getSisterDB("foo").currentOp(true));
- return ((last.ts.t === last3.ts.t) && (last.ts.i === last3.ts.i));
- }, "Replication member 3 did not apply ops 25-75");
-
- jsTest.log("Start 3's bgsync thread");
- restartServerReplication(member3.getMongo());
-
- jsTest.log("Node 3 shouldn't hit rollback");
- var end = (new Date()).getTime() + 10000;
- while ((new Date()).getTime() < end) {
- assert('ROLLBACK' !== member3.runCommand({replSetGetStatus: 1}).members[2].stateStr);
- sleep(30);
- }
-
- // Need to re-enable writes before clean shutdown.
- assert.commandWorked(member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
-
- replSet.stopSet();
+load('jstests/libs/write_concern_util.js');
+load("jstests/replsets/rslib.js");
+// helper to ensure two nodes are at the same place in the oplog
+var waitForSameOplogPosition = function(db1, db2, errmsg) {
+ assert.soon(function() {
+ var last1 = db1.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ var last2 = db2.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ jsTest.log("primary: " + tojson(last1) + " secondary: " + tojson(last2));
+
+ return ((last1.ts.t === last2.ts.t) && (last1.ts.i === last2.ts.i));
+ }, errmsg);
+};
+
+// start set
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+replSet.startSet();
+replSet.initiate({
+ _id: 'testSet',
+ members: [
+ {_id: 0, host: getHostName() + ":" + replSet.ports[0]},
+ {_id: 1, host: getHostName() + ":" + replSet.ports[1], priority: 0},
+ {_id: 2, host: getHostName() + ":" + replSet.ports[2], priority: 0}
+ ],
+ settings: {chainingAllowed: false}
+});
+
+// set up common points of access
+var master = replSet.getPrimary();
+var primary = master.getDB("foo");
+replSet.nodes[1].setSlaveOk();
+replSet.nodes[2].setSlaveOk();
+var member2 = replSet.nodes[1].getDB("admin");
+var member3 = replSet.nodes[2].getDB("admin");
+
+// Do an initial write
+master.getDB("foo").bar.insert({x: 1});
+replSet.awaitReplication();
+
+jsTest.log("Make sure 2 & 3 are syncing from the primary");
+assert.eq(master, replSet.nodes[0]);
+syncFrom(replSet.nodes[1], master, replSet);
+syncFrom(replSet.nodes[2], master, replSet);
+
+jsTest.log("Stop 2's replication");
+member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+
+jsTest.log("Do a few writes");
+for (var i = 0; i < 25; i++) {
+ primary.bar.insert({x: i});
+}
+
+jsTest.log("Make sure 3 is at write #25");
+waitForSameOplogPosition(primary, member3, "node 3 failed to catch up to the primary");
+// This means 3's buffer is empty
+
+jsTest.log("Stop 3's replication");
+member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+// logLevel 3 will allow us to see each op the secondary pulls from the primary so that we can
+// determine whether or not all ops are actually being pulled
+member3.runCommand({setParameter: 1, logLevel: 3});
+
+jsTest.log("Start 2's replication");
+member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+
+jsTest.log("Do some writes");
+for (var i = 25; i < 50; i++) {
+ primary.bar.insert({x: i});
+}
+
+jsTest.log("Make sure 2 is at write #50");
+waitForSameOplogPosition(primary, member2, "node 2 failed to catch up to the primary");
+// This means 2's buffer is empty
+
+jsTest.log("Stop 2's replication");
+member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+
+jsTest.log("Do some writes - 2 & 3 should have up to write #75 in their buffers, but unapplied");
+for (var i = 50; i < 75; i++) {
+ primary.bar.insert({x: i});
+}
+var primaryCollectionSize = primary.bar.find().itcount();
+jsTest.log("primary collection size: " + primaryCollectionSize);
+var last = primary.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+
+jsTest.log("waiting a bit for the secondaries to get the write");
+sleep(10000);
+
+jsTest.log("Shut down the primary");
+replSet.stop(0);
+
+// make sure 3 doesn't try to sync from 2
+// the sleep 30sec is a hold over from the unsafe assert.throws(assert.soon())
+// which would check for 30 seconds that node 3 didn't try to sync from 2
+sleep(30 * 1000);
+jsTest.log("3 should not attempt to sync from 2, as it cannot clear its buffer");
+var syncingTo = member3.adminCommand({replSetGetStatus: 1}).syncingTo;
+assert(syncingTo !== getHostName() + ":" + replSet.ports[1], "node 3 is syncing from node 2 :(");
+
+jsTest.log("Pause 3's bgsync thread");
+stopServerReplication(member3.getMongo());
+
+// count documents in member 3
+assert.eq(26,
+ member3.getSisterDB("foo").bar.find().itcount(),
+ "collection size incorrect on node 3 before applying ops 25-75");
+
+jsTest.log("Allow 3 to apply ops 25-75");
+assert.commandWorked(member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
+ "member 3 rsSyncApplyStop admin command failed");
+
+assert.soon(function() {
+ var last3 = member3.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ jsTest.log("primary: " + tojson(last, '', true) + " secondary: " + tojson(last3, '', true));
+ jsTest.log("member 3 collection size: " + member3.getSisterDB("foo").bar.find().itcount());
+ jsTest.log("curop: ");
+ printjson(member3.getSisterDB("foo").currentOp(true));
+ return ((last.ts.t === last3.ts.t) && (last.ts.i === last3.ts.i));
+}, "Replication member 3 did not apply ops 25-75");
+
+jsTest.log("Start 3's bgsync thread");
+restartServerReplication(member3.getMongo());
+
+jsTest.log("Node 3 shouldn't hit rollback");
+var end = (new Date()).getTime() + 10000;
+while ((new Date()).getTime() < end) {
+ assert('ROLLBACK' !== member3.runCommand({replSetGetStatus: 1}).members[2].stateStr);
+ sleep(30);
+}
+
+// Need to re-enable writes before clean shutdown.
+assert.commandWorked(member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+
+replSet.stopSet();
}()); \ No newline at end of file
diff --git a/jstests/replsets/server_election_metrics.js b/jstests/replsets/server_election_metrics.js
index e8b159144fc..a67e0027e97 100644
--- a/jstests/replsets/server_election_metrics.js
+++ b/jstests/replsets/server_election_metrics.js
@@ -2,51 +2,51 @@
* Tests the format of 'electionMetrics' serverStatus section.
*/
(function() {
- "use strict";
+"use strict";
- // Verifies that the 'electionMetrics' serverStatus section has the given field.
- function verifyElectionMetricsField(serverStatusResponse, fieldName) {
- assert(serverStatusResponse.electionMetrics.hasOwnProperty(fieldName),
- () => (`The 'electionMetrics' serverStatus section did not have the '${fieldName}' \
+// Verifies that the 'electionMetrics' serverStatus section has the given field.
+function verifyElectionMetricsField(serverStatusResponse, fieldName) {
+ assert(serverStatusResponse.electionMetrics.hasOwnProperty(fieldName),
+ () => (`The 'electionMetrics' serverStatus section did not have the '${fieldName}' \
field: \n${tojson(serverStatusResponse.electionMetrics)}`));
- return serverStatusResponse.electionMetrics[fieldName];
- }
+ return serverStatusResponse.electionMetrics[fieldName];
+}
- // Verifies that the 'electionMetrics' serverStatus section has a field for the given election
- // reason counter and that it has the subfields 'called' and 'successful'.
- function verifyElectionReasonCounterFields(serverStatusResponse, fieldName) {
- const field = verifyElectionMetricsField(serverStatusResponse, fieldName);
- assert(field.hasOwnProperty("called"),
- () => (`The '${fieldName}' field in the 'electionMetrics' serverStatus section did \
+// Verifies that the 'electionMetrics' serverStatus section has a field for the given election
+// reason counter and that it has the subfields 'called' and 'successful'.
+function verifyElectionReasonCounterFields(serverStatusResponse, fieldName) {
+ const field = verifyElectionMetricsField(serverStatusResponse, fieldName);
+ assert(field.hasOwnProperty("called"),
+ () => (`The '${fieldName}' field in the 'electionMetrics' serverStatus section did \
not have the 'called' field: \n${tojson(field)}`));
- assert(field.hasOwnProperty("successful"),
- () => (`The '${fieldName}' field in the 'electionMetrics' serverStatus section did \
+ assert(field.hasOwnProperty("successful"),
+ () => (`The '${fieldName}' field in the 'electionMetrics' serverStatus section did \
not have the 'successful' field: \n${tojson(field)}`));
- }
+}
- // Verifies the format of the 'electionMetrics' serverStatus section.
- function verifyElectionMetricsSSS(serverStatusResponse) {
- assert(serverStatusResponse.hasOwnProperty("electionMetrics"),
- () => (`Expected the serverStatus response to have an 'electionMetrics' field:
+// Verifies the format of the 'electionMetrics' serverStatus section.
+function verifyElectionMetricsSSS(serverStatusResponse) {
+ assert(serverStatusResponse.hasOwnProperty("electionMetrics"),
+ () => (`Expected the serverStatus response to have an 'electionMetrics' field:
${tojson(serverStatusResponse)}`));
- verifyElectionReasonCounterFields(serverStatusResponse, "stepUpCmd");
- verifyElectionReasonCounterFields(serverStatusResponse, "priorityTakeover");
- verifyElectionReasonCounterFields(serverStatusResponse, "catchUpTakeover");
- verifyElectionReasonCounterFields(serverStatusResponse, "electionTimeout");
- verifyElectionReasonCounterFields(serverStatusResponse, "freezeTimeout");
- verifyElectionMetricsField(serverStatusResponse, "numStepDownsCausedByHigherTerm");
- verifyElectionMetricsField(serverStatusResponse, "numCatchUps");
- }
+ verifyElectionReasonCounterFields(serverStatusResponse, "stepUpCmd");
+ verifyElectionReasonCounterFields(serverStatusResponse, "priorityTakeover");
+ verifyElectionReasonCounterFields(serverStatusResponse, "catchUpTakeover");
+ verifyElectionReasonCounterFields(serverStatusResponse, "electionTimeout");
+ verifyElectionReasonCounterFields(serverStatusResponse, "freezeTimeout");
+ verifyElectionMetricsField(serverStatusResponse, "numStepDownsCausedByHigherTerm");
+ verifyElectionMetricsField(serverStatusResponse, "numCatchUps");
+}
- // Set up the replica set.
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const primary = rst.getPrimary();
+// Set up the replica set.
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const primary = rst.getPrimary();
- const serverStatusResponse = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
- verifyElectionMetricsSSS(serverStatusResponse);
+const serverStatusResponse = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+verifyElectionMetricsSSS(serverStatusResponse);
- // Stop the replica set.
- rst.stopSet();
+// Stop the replica set.
+rst.stopSet();
}());
diff --git a/jstests/replsets/sessions_collection_auto_healing.js b/jstests/replsets/sessions_collection_auto_healing.js
index b75ed876d25..28f3dc51bba 100644
--- a/jstests/replsets/sessions_collection_auto_healing.js
+++ b/jstests/replsets/sessions_collection_auto_healing.js
@@ -1,122 +1,120 @@
load('jstests/libs/sessions_collection.js');
(function() {
- "use strict";
+"use strict";
- // This test makes assertions about the number of sessions, which are not compatible with
- // implicit sessions.
- TestData.disableImplicitSessions = true;
+// This test makes assertions about the number of sessions, which are not compatible with
+// implicit sessions.
+TestData.disableImplicitSessions = true;
- var replTest = new ReplSetTest({
- name: 'refresh',
- nodes: [{rsConfig: {votes: 1, priority: 1}}, {rsConfig: {votes: 0, priority: 0}}]
- });
- var nodes = replTest.startSet();
+var replTest = new ReplSetTest({
+ name: 'refresh',
+ nodes: [{rsConfig: {votes: 1, priority: 1}}, {rsConfig: {votes: 0, priority: 0}}]
+});
+var nodes = replTest.startSet();
- replTest.initiate();
- var primary = replTest.getPrimary();
- var primaryAdmin = primary.getDB("admin");
+replTest.initiate();
+var primary = replTest.getPrimary();
+var primaryAdmin = primary.getDB("admin");
- replTest.awaitSecondaryNodes();
- var secondary = replTest.getSecondary();
- var secondaryAdmin = secondary.getDB("admin");
+replTest.awaitSecondaryNodes();
+var secondary = replTest.getSecondary();
+var secondaryAdmin = secondary.getDB("admin");
- // Get the current value of the TTL index so that we can verify it's being properly applied.
- let res = assert.commandWorked(
- primary.adminCommand({getParameter: 1, localLogicalSessionTimeoutMinutes: 1}));
- let timeoutMinutes = res.localLogicalSessionTimeoutMinutes;
+// Get the current value of the TTL index so that we can verify it's being properly applied.
+let res = assert.commandWorked(
+ primary.adminCommand({getParameter: 1, localLogicalSessionTimeoutMinutes: 1}));
+let timeoutMinutes = res.localLogicalSessionTimeoutMinutes;
- // Test that we can use sessions on the primary before the sessions collection exists.
- {
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+// Test that we can use sessions on the primary before the sessions collection exists.
+{
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- assert.commandWorked(primaryAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(primaryAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(primary, false, false, timeoutMinutes);
- }
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
+}
- // Test that we can use sessions on secondaries before the sessions collection exists.
- {
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+// Test that we can use sessions on secondaries before the sessions collection exists.
+{
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
- assert.commandWorked(secondaryAdmin.runCommand({startSession: 1}));
+ assert.commandWorked(secondaryAdmin.runCommand({startSession: 1}));
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
- }
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
+}
- // Test that a refresh on a secondary does not create the sessions collection.
- {
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+// Test that a refresh on a secondary does not create the sessions collection.
+{
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
- assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
- }
- // Test that a refresh on the primary creates the sessions collection.
- {
- validateSessionsCollection(primary, false, false, timeoutMinutes);
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
+}
+// Test that a refresh on the primary creates the sessions collection.
+{
+ validateSessionsCollection(primary, false, false, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, false, false, timeoutMinutes);
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, false, false, timeoutMinutes);
- assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, true, true, timeoutMinutes);
- }
+ validateSessionsCollection(primary, true, true, timeoutMinutes);
+}
- // Test that a refresh on a secondary will not create the TTL index on the sessions collection.
- {
- assert.commandWorked(primary.getDB("config").system.sessions.dropIndex({lastUse: 1}));
+// Test that a refresh on a secondary will not create the TTL index on the sessions collection.
+{
+ assert.commandWorked(primary.getDB("config").system.sessions.dropIndex({lastUse: 1}));
- validateSessionsCollection(primary, true, false, timeoutMinutes);
+ validateSessionsCollection(primary, true, false, timeoutMinutes);
- assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(secondaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, true, false, timeoutMinutes);
- }
+ validateSessionsCollection(primary, true, false, timeoutMinutes);
+}
- // Test that a refresh on the primary will create the TTL index on the sessions collection.
- {
- validateSessionsCollection(primary, true, false, timeoutMinutes);
+// Test that a refresh on the primary will create the TTL index on the sessions collection.
+{
+ validateSessionsCollection(primary, true, false, timeoutMinutes);
- assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+ assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, true, true, timeoutMinutes);
- }
+ validateSessionsCollection(primary, true, true, timeoutMinutes);
+}
- timeoutMinutes = 4;
+timeoutMinutes = 4;
- replTest.restart(
- 0,
- {startClean: false, setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes});
+replTest.restart(
+ 0, {startClean: false, setParameter: "localLogicalSessionTimeoutMinutes=" + timeoutMinutes});
- primary = replTest.getPrimary();
- primaryAdmin = primary.getDB("admin");
- secondary = replTest.getSecondary();
+primary = replTest.getPrimary();
+primaryAdmin = primary.getDB("admin");
+secondary = replTest.getSecondary();
- // Test that a change to the TTL index expiration on restart will generate a collMod to change
- // the expiration time.
- {
- assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
+// Test that a change to the TTL index expiration on restart will generate a collMod to change
+// the expiration time.
+{
+ assert.commandWorked(primaryAdmin.runCommand({refreshLogicalSessionCacheNow: 1}));
- validateSessionsCollection(primary, true, true, timeoutMinutes);
+ validateSessionsCollection(primary, true, true, timeoutMinutes);
- replTest.awaitReplication();
- validateSessionsCollection(secondary, true, true, timeoutMinutes);
- }
-
- replTest.stopSet();
+ replTest.awaitReplication();
+ validateSessionsCollection(secondary, true, true, timeoutMinutes);
+}
+replTest.stopSet();
})();
diff --git a/jstests/replsets/shutdown.js b/jstests/replsets/shutdown.js
index b35172a808f..7fc2e19c749 100644
--- a/jstests/replsets/shutdown.js
+++ b/jstests/replsets/shutdown.js
@@ -2,30 +2,30 @@
//
load('jstests/replsets/rslib.js');
(function() {
- "use strict";
+"use strict";
- let ns = "test.coll";
+let ns = "test.coll";
- let rst = new ReplSetTest({
- nodes: 2,
- });
+let rst = new ReplSetTest({
+ nodes: 2,
+});
- let conf = rst.getReplSetConfig();
- conf.members[1].votes = 0;
- conf.members[1].priority = 0;
- conf.members[1].hidden = true;
+let conf = rst.getReplSetConfig();
+conf.members[1].votes = 0;
+conf.members[1].priority = 0;
+conf.members[1].hidden = true;
- rst.startSet();
- rst.initiate(conf);
- rst.awaitReplication();
+rst.startSet();
+rst.initiate(conf);
+rst.awaitReplication();
- let secondary = rst.getSecondary();
- rst.stop(secondary);
- let program = rst.start(
- secondary,
- {waitForConnect: false, setParameter: "failpoint.shutdownAtStartup={mode:'alwaysOn'}"});
- // mongod should exit automatically, since failpoint was set.
- let exitCode = waitProgram(program.pid);
- assert.eq(0, exitCode);
- rst.stopSet();
+let secondary = rst.getSecondary();
+rst.stop(secondary);
+let program = rst.start(
+ secondary,
+ {waitForConnect: false, setParameter: "failpoint.shutdownAtStartup={mode:'alwaysOn'}"});
+// mongod should exit automatically, since failpoint was set.
+let exitCode = waitProgram(program.pid);
+assert.eq(0, exitCode);
+rst.stopSet();
})();
diff --git a/jstests/replsets/shutdown_primary.js b/jstests/replsets/shutdown_primary.js
index 65eb4ec3e59..bcaefe8c541 100644
--- a/jstests/replsets/shutdown_primary.js
+++ b/jstests/replsets/shutdown_primary.js
@@ -12,56 +12,56 @@
*
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries,
- // restartReplicationOnSecondaries
- var name = "shutdown_primary";
+load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries,
+ // restartReplicationOnSecondaries
+var name = "shutdown_primary";
- var replTest = new ReplSetTest({name: name, nodes: 3});
- replTest.startSet();
- replTest.initiate();
+var replTest = new ReplSetTest({name: name, nodes: 3});
+replTest.startSet();
+replTest.initiate();
- var primary = replTest.getPrimary();
- var testDB = primary.getDB(name);
- var timeout = ReplSetTest.kDefaultTimeoutMS;
- assert.writeOK(testDB.foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: timeout}}));
+var primary = replTest.getPrimary();
+var testDB = primary.getDB(name);
+var timeout = ReplSetTest.kDefaultTimeoutMS;
+assert.writeOK(testDB.foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: timeout}}));
- jsTestLog("Blocking replication to secondaries.");
- stopReplicationOnSecondaries(replTest);
+jsTestLog("Blocking replication to secondaries.");
+stopReplicationOnSecondaries(replTest);
- jsTestLog("Executing write to primary.");
- assert.writeOK(testDB.foo.insert({x: 2}));
+jsTestLog("Executing write to primary.");
+assert.writeOK(testDB.foo.insert({x: 2}));
- jsTestLog("Attempting to shut down primary.");
- assert.commandFailedWithCode(primary.adminCommand({shutdown: 1}),
- ErrorCodes.ExceededTimeLimit,
- "shut down did not fail with 'ExceededTimeLimit'");
+jsTestLog("Attempting to shut down primary.");
+assert.commandFailedWithCode(primary.adminCommand({shutdown: 1}),
+ ErrorCodes.ExceededTimeLimit,
+ "shut down did not fail with 'ExceededTimeLimit'");
- jsTestLog("Verifying primary did not shut down.");
- assert.writeOK(testDB.foo.insert({x: 3}));
+jsTestLog("Verifying primary did not shut down.");
+assert.writeOK(testDB.foo.insert({x: 3}));
- jsTestLog("Shutting down primary in a parallel shell");
- var awaitShell = startParallelShell(function() {
- db.adminCommand({shutdown: 1, timeoutSecs: 60});
- }, primary.port);
+jsTestLog("Shutting down primary in a parallel shell");
+var awaitShell = startParallelShell(function() {
+ db.adminCommand({shutdown: 1, timeoutSecs: 60});
+}, primary.port);
- jsTestLog("Resuming replication.");
- restartReplicationOnSecondaries(replTest);
+jsTestLog("Resuming replication.");
+restartReplicationOnSecondaries(replTest);
- jsTestLog("Verifying primary shut down and cannot be connected to.");
- // Successfully starting shutdown throws a network error.
- var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode, "expected shutdown to close the shell's connection");
- assert.soonNoExcept(function() {
- // The parallel shell exits while shutdown is in progress, and if this happens early enough,
- // the primary can still accept connections despite successfully starting to shutdown.
- // So, retry connecting until connections cannot be established and an error is thrown.
- assert.throws(function() {
- new Mongo(primary.host);
- });
- return true;
- }, "expected primary node to shut down and not be connectable");
+jsTestLog("Verifying primary shut down and cannot be connected to.");
+// Successfully starting shutdown throws a network error.
+var exitCode = awaitShell({checkExitSuccess: false});
+assert.neq(0, exitCode, "expected shutdown to close the shell's connection");
+assert.soonNoExcept(function() {
+ // The parallel shell exits while shutdown is in progress, and if this happens early enough,
+ // the primary can still accept connections despite successfully starting to shutdown.
+ // So, retry connecting until connections cannot be established and an error is thrown.
+ assert.throws(function() {
+ new Mongo(primary.host);
+ });
+ return true;
+}, "expected primary node to shut down and not be connectable");
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/shutdown_with_prepared_transaction.js b/jstests/replsets/shutdown_with_prepared_transaction.js
index f8844b21074..d241df1a68b 100644
--- a/jstests/replsets/shutdown_with_prepared_transaction.js
+++ b/jstests/replsets/shutdown_with_prepared_transaction.js
@@ -4,35 +4,35 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const conn = replTest.getPrimary();
+const conn = replTest.getPrimary();
- const dbName = "test";
- const collName = "shutdown_with_prepared_txn";
- const testDB = conn.getDB(dbName);
- const testColl = testDB.getCollection(collName);
+const dbName = "test";
+const collName = "shutdown_with_prepared_txn";
+const testDB = conn.getDB(dbName);
+const testColl = testDB.getCollection(collName);
- assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
+assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}}));
- const session = conn.startSession({causalConsistency: false});
- const sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
+const session = conn.startSession({causalConsistency: false});
+const sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
- jsTestLog("Starting a simple transaction and putting it into prepare");
+jsTestLog("Starting a simple transaction and putting it into prepare");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: 1}));
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: 1}));
- PrepareHelpers.prepareTransaction(session);
+PrepareHelpers.prepareTransaction(session);
- jsTestLog("Shutting down the set with the transaction still in prepare state");
- // Skip validation during ReplSetTest cleanup since validate() will block behind the prepared
- // transaction's locks when trying to take a collection X lock.
- replTest.stopSet(null /*signal*/, false /*forRestart*/, {skipValidation: true});
+jsTestLog("Shutting down the set with the transaction still in prepare state");
+// Skip validation during ReplSetTest cleanup since validate() will block behind the prepared
+// transaction's locks when trying to take a collection X lock.
+replTest.stopSet(null /*signal*/, false /*forRestart*/, {skipValidation: true});
}());
diff --git a/jstests/replsets/sized_zero_capped.js b/jstests/replsets/sized_zero_capped.js
index 41debd6d17c..91aaacdda40 100644
--- a/jstests/replsets/sized_zero_capped.js
+++ b/jstests/replsets/sized_zero_capped.js
@@ -2,28 +2,28 @@
// SECONDARY to crash. (see SERVER-18792)
(function() {
- "use strict";
+"use strict";
- var name = "sized_zero_capped";
- var replTest = new ReplSetTest({name: name, nodes: 3});
- var nodes = replTest.nodeList();
- replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], priority: 3},
- {"_id": 1, "host": nodes[1], priority: 0},
- {"_id": 2, "host": nodes[2], priority: 0}
- ]
- });
+var name = "sized_zero_capped";
+var replTest = new ReplSetTest({name: name, nodes: 3});
+var nodes = replTest.nodeList();
+replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], priority: 0}
+ ]
+});
- var testDB = replTest.getPrimary().getDB(name);
- testDB.createCollection(name, {capped: true, size: 0});
- replTest.awaitReplication();
+var testDB = replTest.getPrimary().getDB(name);
+testDB.createCollection(name, {capped: true, size: 0});
+replTest.awaitReplication();
- // ensure secondary is still up and responsive
- var secondary = replTest.getSecondary();
- assert.commandWorked(secondary.getDB(name).runCommand({ping: 1}));
+// ensure secondary is still up and responsive
+var secondary = replTest.getSecondary();
+assert.commandWorked(secondary.getDB(name).runCommand({ping: 1}));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/slave_delay_clean_shutdown.js b/jstests/replsets/slave_delay_clean_shutdown.js
index 80371048b6b..2d3e75824da 100644
--- a/jstests/replsets/slave_delay_clean_shutdown.js
+++ b/jstests/replsets/slave_delay_clean_shutdown.js
@@ -3,64 +3,64 @@
// @tags: [requires_persistence]
load('jstests/replsets/rslib.js');
(function() {
- "use strict";
+"use strict";
- // Skip db hash check since secondary has slave delay.
- TestData.skipCheckDBHashes = true;
+// Skip db hash check since secondary has slave delay.
+TestData.skipCheckDBHashes = true;
- var ns = "test.coll";
+var ns = "test.coll";
- var rst = new ReplSetTest({
- nodes: 2,
- });
+var rst = new ReplSetTest({
+ nodes: 2,
+});
- var conf = rst.getReplSetConfig();
- conf.members[1].votes = 0;
- conf.members[1].priority = 0;
- conf.members[1].hidden = true;
- conf.members[1].slaveDelay = 0; // Set later.
+var conf = rst.getReplSetConfig();
+conf.members[1].votes = 0;
+conf.members[1].priority = 0;
+conf.members[1].hidden = true;
+conf.members[1].slaveDelay = 0; // Set later.
- rst.startSet();
- rst.initiate(conf);
+rst.startSet();
+rst.initiate(conf);
- var master = rst.getPrimary(); // Waits for PRIMARY state.
+var master = rst.getPrimary(); // Waits for PRIMARY state.
- // Push some ops through before setting slave delay.
- assert.writeOK(master.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}}));
+// Push some ops through before setting slave delay.
+assert.writeOK(master.getCollection(ns).insert([{}, {}, {}], {writeConcern: {w: 2}}));
- // Set slaveDelay and wait for secondary to receive the change.
- conf = rst.getReplSetConfigFromNode();
- conf.version++;
- conf.members[1].slaveDelay = 24 * 60 * 60;
- reconfig(rst, conf);
- assert.soon(() => rst.getReplSetConfigFromNode(1).members[1].slaveDelay > 0,
- () => rst.getReplSetConfigFromNode(1));
+// Set slaveDelay and wait for secondary to receive the change.
+conf = rst.getReplSetConfigFromNode();
+conf.version++;
+conf.members[1].slaveDelay = 24 * 60 * 60;
+reconfig(rst, conf);
+assert.soon(() => rst.getReplSetConfigFromNode(1).members[1].slaveDelay > 0,
+ () => rst.getReplSetConfigFromNode(1));
- sleep(2000); // The secondary apply loop only checks for slaveDelay changes once per second.
- var secondary = rst.getSecondary();
- const lastOp = getLatestOp(secondary);
+sleep(2000); // The secondary apply loop only checks for slaveDelay changes once per second.
+var secondary = rst.getSecondary();
+const lastOp = getLatestOp(secondary);
- assert.writeOK(master.getCollection(ns).insert([{}, {}, {}]));
- assert.soon(() => secondary.adminCommand('serverStatus').metrics.repl.buffer.count > 0,
- () => secondary.adminCommand('serverStatus').metrics.repl);
- assert.neq(getLatestOp(master), lastOp);
- assert.eq(getLatestOp(secondary), lastOp);
+assert.writeOK(master.getCollection(ns).insert([{}, {}, {}]));
+assert.soon(() => secondary.adminCommand('serverStatus').metrics.repl.buffer.count > 0,
+ () => secondary.adminCommand('serverStatus').metrics.repl);
+assert.neq(getLatestOp(master), lastOp);
+assert.eq(getLatestOp(secondary), lastOp);
- sleep(2000); // Prevent the test from passing by chance.
- assert.eq(getLatestOp(secondary), lastOp);
+sleep(2000); // Prevent the test from passing by chance.
+assert.eq(getLatestOp(secondary), lastOp);
- // Make sure shutdown won't take a long time due to I/O.
- secondary.adminCommand('fsync');
+// Make sure shutdown won't take a long time due to I/O.
+secondary.adminCommand('fsync');
- // Shutting down shouldn't take long.
- assert.lt(Date.timeFunc(() => rst.stop(1)), 60 * 1000);
+// Shutting down shouldn't take long.
+assert.lt(Date.timeFunc(() => rst.stop(1)), 60 * 1000);
- secondary = rst.restart(1);
- rst.awaitSecondaryNodes();
+secondary = rst.restart(1);
+rst.awaitSecondaryNodes();
- assert.eq(getLatestOp(secondary), lastOp);
- sleep(2000); // Prevent the test from passing by chance.
- assert.eq(getLatestOp(secondary), lastOp);
+assert.eq(getLatestOp(secondary), lastOp);
+sleep(2000); // Prevent the test from passing by chance.
+assert.eq(getLatestOp(secondary), lastOp);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js
index 4a356eb62da..98dc46bacc4 100644
--- a/jstests/replsets/slavedelay1.js
+++ b/jstests/replsets/slavedelay1.js
@@ -1,7 +1,6 @@
load("jstests/replsets/rslib.js");
doTest = function(signal) {
-
var name = "slaveDelay";
var host = getHostName();
diff --git a/jstests/replsets/slaveok_read_pref.js b/jstests/replsets/slaveok_read_pref.js
index cb923324741..78b0139adfd 100644
--- a/jstests/replsets/slaveok_read_pref.js
+++ b/jstests/replsets/slaveok_read_pref.js
@@ -1,55 +1,53 @@
// Test that slaveOk is implicitly allowed for queries on a secondary with a read preference other
// than 'primary', and that queries which do have 'primary' read preference fail.
(function() {
- "use strict";
-
- const readPrefs =
- [undefined, "primary", "secondary", "primaryPreferred", "secondaryPreferred", "nearest"];
-
- const rst = new ReplSetTest({nodes: 3});
- rst.startSet();
-
- const nodes = rst.nodeList();
- rst.initiate({
- _id: jsTestName(),
- members: [
- {_id: 0, host: nodes[0]},
- {_id: 1, host: nodes[1], priority: 0},
- {_id: 2, host: nodes[2], arbiterOnly: true}
- ]
- });
-
- const priDB = rst.getPrimary().getDB(jsTestName());
- assert(priDB.dropDatabase());
-
- assert.commandWorked(priDB.test.insert({a: 1}, {writeConcern: {w: "majority"}}));
-
- const secDB = rst.getSecondary().getDB(jsTestName());
-
- for (let readMode of["commands", "legacy"]) {
- for (let readPref of readPrefs) {
- for (let slaveOk of[true, false]) {
- const testType = {readMode: readMode, readPref: readPref, slaveOk: slaveOk};
-
- secDB.getMongo().forceReadMode(readMode);
- secDB.getMongo().setSlaveOk(slaveOk);
-
- const cursor =
- (readPref ? secDB.test.find().readPref(readPref) : secDB.test.find());
-
- if (readPref === "primary" || (!readPref && !slaveOk)) {
- // Attempting to run the query throws an error of type NotMasterNoSlaveOk.
- const slaveOkErr = assert.throws(() => cursor.itcount(), [], tojson(testType));
- assert.commandFailedWithCode(slaveOkErr, ErrorCodes.NotMasterNoSlaveOk);
- } else {
- // Succeeds for all non-primary readPrefs, and for no readPref iff slaveOk.
- const docCount =
- assert.doesNotThrow(() => cursor.itcount(), [], tojson(testType));
- assert.eq(docCount, 1);
- }
+"use strict";
+
+const readPrefs =
+ [undefined, "primary", "secondary", "primaryPreferred", "secondaryPreferred", "nearest"];
+
+const rst = new ReplSetTest({nodes: 3});
+rst.startSet();
+
+const nodes = rst.nodeList();
+rst.initiate({
+ _id: jsTestName(),
+ members: [
+ {_id: 0, host: nodes[0]},
+ {_id: 1, host: nodes[1], priority: 0},
+ {_id: 2, host: nodes[2], arbiterOnly: true}
+ ]
+});
+
+const priDB = rst.getPrimary().getDB(jsTestName());
+assert(priDB.dropDatabase());
+
+assert.commandWorked(priDB.test.insert({a: 1}, {writeConcern: {w: "majority"}}));
+
+const secDB = rst.getSecondary().getDB(jsTestName());
+
+for (let readMode of ["commands", "legacy"]) {
+ for (let readPref of readPrefs) {
+ for (let slaveOk of [true, false]) {
+ const testType = {readMode: readMode, readPref: readPref, slaveOk: slaveOk};
+
+ secDB.getMongo().forceReadMode(readMode);
+ secDB.getMongo().setSlaveOk(slaveOk);
+
+ const cursor = (readPref ? secDB.test.find().readPref(readPref) : secDB.test.find());
+
+ if (readPref === "primary" || (!readPref && !slaveOk)) {
+ // Attempting to run the query throws an error of type NotMasterNoSlaveOk.
+ const slaveOkErr = assert.throws(() => cursor.itcount(), [], tojson(testType));
+ assert.commandFailedWithCode(slaveOkErr, ErrorCodes.NotMasterNoSlaveOk);
+ } else {
+ // Succeeds for all non-primary readPrefs, and for no readPref iff slaveOk.
+ const docCount = assert.doesNotThrow(() => cursor.itcount(), [], tojson(testType));
+ assert.eq(docCount, 1);
}
}
}
+}
- rst.stopSet();
+rst.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/speculative_majority_find.js b/jstests/replsets/speculative_majority_find.js
index fecfbf5dea1..94463145b7e 100644
--- a/jstests/replsets/speculative_majority_find.js
+++ b/jstests/replsets/speculative_majority_find.js
@@ -10,148 +10,148 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- load("jstests/libs/parallelTester.js"); // for ScopedThread.
-
- let name = "speculative_majority_find";
- let replTest = new ReplSetTest({
- name: name,
- nodes: [{}, {rsConfig: {priority: 0}}],
- nodeOptions: {enableMajorityReadConcern: 'false'}
- });
- replTest.startSet();
- replTest.initiate();
-
- let dbName = name;
- let collName = "coll";
-
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- let primaryDB = primary.getDB(dbName);
- let secondaryDB = secondary.getDB(dbName);
- let primaryColl = primaryDB[collName];
- // Create a collection.
- assert.commandWorked(primaryColl.insert({}, {writeConcern: {w: "majority"}}));
-
- //
- // Test basic reads with speculative majority.
- //
-
- // Pause replication on the secondary so that writes won't majority commit.
- stopServerReplication(secondary);
- assert.commandWorked(primaryColl.insert({_id: 1}));
-
- jsTestLog("Do a speculative majority read that should time out.");
- let res = primaryDB.runCommand({
+"use strict";
+
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/parallelTester.js"); // for ScopedThread.
+
+let name = "speculative_majority_find";
+let replTest = new ReplSetTest({
+ name: name,
+ nodes: [{}, {rsConfig: {priority: 0}}],
+ nodeOptions: {enableMajorityReadConcern: 'false'}
+});
+replTest.startSet();
+replTest.initiate();
+
+let dbName = name;
+let collName = "coll";
+
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+let primaryDB = primary.getDB(dbName);
+let secondaryDB = secondary.getDB(dbName);
+let primaryColl = primaryDB[collName];
+// Create a collection.
+assert.commandWorked(primaryColl.insert({}, {writeConcern: {w: "majority"}}));
+
+//
+// Test basic reads with speculative majority.
+//
+
+// Pause replication on the secondary so that writes won't majority commit.
+stopServerReplication(secondary);
+assert.commandWorked(primaryColl.insert({_id: 1}));
+
+jsTestLog("Do a speculative majority read that should time out.");
+let res = primaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority"},
+ filter: {_id: 1},
+ allowSpeculativeMajorityRead: true,
+ maxTimeMS: 5000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+restartServerReplication(secondary);
+replTest.awaitReplication();
+
+jsTestLog("Do a speculative majority read that should succeed.");
+res = primaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority"},
+ filter: {_id: 1},
+ allowSpeculativeMajorityRead: true
+});
+assert.commandWorked(res);
+assert.eq(res.cursor.firstBatch.length, 1);
+assert.eq(res.cursor.firstBatch[0], {_id: 1});
+
+//
+// Test that blocked reads can succeed when a write majority commits.
+//
+
+// Pause replication on the secondary so that writes won't majority commit.
+stopServerReplication(secondary);
+assert.commandWorked(primaryColl.insert({_id: 2}));
+
+jsTestLog("Do a speculative majority that should block until write commits.");
+let speculativeRead = new ScopedThread(function(host, dbName, collName) {
+ const nodeDB = new Mongo(host).getDB(dbName);
+ return nodeDB.runCommand({
find: collName,
readConcern: {level: "majority"},
- filter: {_id: 1},
- allowSpeculativeMajorityRead: true,
- maxTimeMS: 5000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- restartServerReplication(secondary);
- replTest.awaitReplication();
-
- jsTestLog("Do a speculative majority read that should succeed.");
- res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority"},
- filter: {_id: 1},
- allowSpeculativeMajorityRead: true
- });
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch.length, 1);
- assert.eq(res.cursor.firstBatch[0], {_id: 1});
-
- //
- // Test that blocked reads can succeed when a write majority commits.
- //
-
- // Pause replication on the secondary so that writes won't majority commit.
- stopServerReplication(secondary);
- assert.commandWorked(primaryColl.insert({_id: 2}));
-
- jsTestLog("Do a speculative majority that should block until write commits.");
- let speculativeRead = new ScopedThread(function(host, dbName, collName) {
- const nodeDB = new Mongo(host).getDB(dbName);
- return nodeDB.runCommand({
- find: collName,
- readConcern: {level: "majority"},
- filter: {_id: 2},
- allowSpeculativeMajorityRead: true
- });
- }, primary.host, dbName, collName);
- speculativeRead.start();
-
- // Wait for the read to start on the server.
- assert.soon(() => primaryDB.currentOp({ns: primaryColl.getFullName(), "command.find": collName})
- .inprog.length === 1);
-
- // Let the previous write commit.
- restartServerReplication(secondary);
- assert.commandWorked(
- primaryColl.insert({_id: "commit_last_write"}, {writeConcern: {w: "majority"}}));
-
- // Make sure the read finished and returned correct results.
- speculativeRead.join();
- res = speculativeRead.returnData();
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch.length, 1);
- assert.eq(res.cursor.firstBatch[0], {_id: 2});
-
- //
- // Test 'afterClusterTime' reads with speculative majority.
- //
- stopServerReplication(secondary);
-
- // Insert a document on the primary and record the response.
- let writeRes = primaryDB.runCommand({insert: collName, documents: [{_id: 3}]});
- assert.commandWorked(writeRes);
-
- jsTestLog(
- "Do a speculative majority read on primary with 'afterClusterTime' that should time out.");
- res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
- filter: {_id: 3},
- $clusterTime: writeRes.$clusterTime,
- allowSpeculativeMajorityRead: true,
- maxTimeMS: 5000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog(
- "Do a speculative majority read on secondary with 'afterClusterTime' that should time out.");
- res = secondaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
- filter: {_id: 3},
- $clusterTime: writeRes.$clusterTime,
- allowSpeculativeMajorityRead: true,
- maxTimeMS: 5000
- });
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
-
- // Let the previous write majority commit.
- restartServerReplication(secondary);
- replTest.awaitReplication();
-
- jsTestLog("Do a speculative majority read with 'afterClusterTime' that should succeed.");
- res = primaryDB.runCommand({
- find: collName,
- readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
- filter: {_id: 3},
- $clusterTime: res.$clusterTime,
+ filter: {_id: 2},
allowSpeculativeMajorityRead: true
});
- assert.commandWorked(res);
- assert.eq(res.cursor.firstBatch.length, 1);
- assert.eq(res.cursor.firstBatch[0], {_id: 3});
-
- replTest.stopSet();
+}, primary.host, dbName, collName);
+speculativeRead.start();
+
+// Wait for the read to start on the server.
+assert.soon(() => primaryDB.currentOp({ns: primaryColl.getFullName(), "command.find": collName})
+ .inprog.length === 1);
+
+// Let the previous write commit.
+restartServerReplication(secondary);
+assert.commandWorked(
+ primaryColl.insert({_id: "commit_last_write"}, {writeConcern: {w: "majority"}}));
+
+// Make sure the read finished and returned correct results.
+speculativeRead.join();
+res = speculativeRead.returnData();
+assert.commandWorked(res);
+assert.eq(res.cursor.firstBatch.length, 1);
+assert.eq(res.cursor.firstBatch[0], {_id: 2});
+
+//
+// Test 'afterClusterTime' reads with speculative majority.
+//
+stopServerReplication(secondary);
+
+// Insert a document on the primary and record the response.
+let writeRes = primaryDB.runCommand({insert: collName, documents: [{_id: 3}]});
+assert.commandWorked(writeRes);
+
+jsTestLog(
+ "Do a speculative majority read on primary with 'afterClusterTime' that should time out.");
+res = primaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
+ filter: {_id: 3},
+ $clusterTime: writeRes.$clusterTime,
+ allowSpeculativeMajorityRead: true,
+ maxTimeMS: 5000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+jsTestLog(
+ "Do a speculative majority read on secondary with 'afterClusterTime' that should time out.");
+res = secondaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
+ filter: {_id: 3},
+ $clusterTime: writeRes.$clusterTime,
+ allowSpeculativeMajorityRead: true,
+ maxTimeMS: 5000
+});
+assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+
+// Let the previous write majority commit.
+restartServerReplication(secondary);
+replTest.awaitReplication();
+
+jsTestLog("Do a speculative majority read with 'afterClusterTime' that should succeed.");
+res = primaryDB.runCommand({
+ find: collName,
+ readConcern: {level: "majority", afterClusterTime: writeRes.operationTime},
+ filter: {_id: 3},
+ $clusterTime: res.$clusterTime,
+ allowSpeculativeMajorityRead: true
+});
+assert.commandWorked(res);
+assert.eq(res.cursor.firstBatch.length, 1);
+assert.eq(res.cursor.firstBatch[0], {_id: 3});
+
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/speculative_majority_supported_commands.js b/jstests/replsets/speculative_majority_supported_commands.js
index 26fd40aa244..7c1bbdb2434 100644
--- a/jstests/replsets/speculative_majority_supported_commands.js
+++ b/jstests/replsets/speculative_majority_supported_commands.js
@@ -7,70 +7,70 @@
* @tags: [uses_speculative_majority]
*/
(function() {
- "use strict";
+"use strict";
- let name = "speculative_majority_supported_commands";
- let replTest =
- new ReplSetTest({name: name, nodes: 1, nodeOptions: {enableMajorityReadConcern: 'false'}});
- replTest.startSet();
- replTest.initiate();
+let name = "speculative_majority_supported_commands";
+let replTest =
+ new ReplSetTest({name: name, nodes: 1, nodeOptions: {enableMajorityReadConcern: 'false'}});
+replTest.startSet();
+replTest.initiate();
- let dbName = name;
- let collName = "coll";
+let dbName = name;
+let collName = "coll";
- let primary = replTest.getPrimary();
- let primaryDB = primary.getDB(dbName);
+let primary = replTest.getPrimary();
+let primaryDB = primary.getDB(dbName);
- // Create a collection.
- assert.commandWorked(primaryDB[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
+// Create a collection.
+assert.commandWorked(primaryDB[collName].insert({_id: 0}, {writeConcern: {w: "majority"}}));
- /**
- * Allowed commands.
- */
+/**
+ * Allowed commands.
+ */
- // Change stream aggregation is allowed.
- let res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$changeStream: {}}],
- cursor: {},
- readConcern: {level: "majority"}
- });
- assert.commandWorked(res);
+// Change stream aggregation is allowed.
+let res = primaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ readConcern: {level: "majority"}
+});
+assert.commandWorked(res);
- // Find query with speculative flag is allowed.
- res = primaryDB.runCommand(
- {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
- assert.commandWorked(res);
+// Find query with speculative flag is allowed.
+res = primaryDB.runCommand(
+ {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
+assert.commandWorked(res);
- /**
- * Disallowed commands.
- */
+/**
+ * Disallowed commands.
+ */
- // A non change stream aggregation is not allowed.
- res = primaryDB.runCommand({
- aggregate: collName,
- pipeline: [{$project: {}}],
- cursor: {},
- readConcern: {level: "majority"}
- });
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+// A non change stream aggregation is not allowed.
+res = primaryDB.runCommand({
+ aggregate: collName,
+ pipeline: [{$project: {}}],
+ cursor: {},
+ readConcern: {level: "majority"}
+});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- // The 'find' command without requisite flag is unsupported.
- res = primaryDB.runCommand({find: collName, readConcern: {level: "majority"}});
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+// The 'find' command without requisite flag is unsupported.
+res = primaryDB.runCommand({find: collName, readConcern: {level: "majority"}});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- res = primaryDB.runCommand(
- {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: false});
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+res = primaryDB.runCommand(
+ {find: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: false});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- // Another basic read command. We don't exhaustively check all commands.
- res = primaryDB.runCommand({count: collName, readConcern: {level: "majority"}});
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+// Another basic read command. We don't exhaustively check all commands.
+res = primaryDB.runCommand({count: collName, readConcern: {level: "majority"}});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- // Speculative flag is only allowed on find commands.
- res = primaryDB.runCommand(
- {count: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
- assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
+// Speculative flag is only allowed on find commands.
+res = primaryDB.runCommand(
+ {count: collName, readConcern: {level: "majority"}, allowSpeculativeMajorityRead: true});
+assert.commandFailedWithCode(res, ErrorCodes.ReadConcernMajorityNotEnabled);
- replTest.stopSet();
+replTest.stopSet();
})(); \ No newline at end of file
diff --git a/jstests/replsets/speculative_read_transaction.js b/jstests/replsets/speculative_read_transaction.js
index 5ed70ff05e8..a7a8902d02e 100644
--- a/jstests/replsets/speculative_read_transaction.js
+++ b/jstests/replsets/speculative_read_transaction.js
@@ -5,105 +5,105 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
- load("jstests/libs/write_concern_util.js"); // For stopServerReplication
-
- const dbName = "test";
- const collName = "speculative_read_transaction";
-
- const rst = new ReplSetTest({name: collName, nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const testDB = primary.getDB(dbName);
- const coll = testDB[collName];
-
- function runTest(sessionOptions) {
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
-
- // Do an initial write so we have something to update.
- assert.commandWorked(coll.insert([{_id: 0, x: 0}], {w: "majority"}));
- rst.awaitLastOpCommitted();
-
- // Stop replication on the secondary so the majority commit never moves forward.
- stopServerReplication(secondary);
-
- // Do a local update in another client.
- // The transaction should see this, due to speculative behavior.
- const otherclient = new Mongo(primary.host);
- assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 1}, {w: 1}));
-
- // Initiate a session on the primary.
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
-
- // Abort does not wait for write concern.
- jsTestLog("Starting majority-abort transaction");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
- assert.commandWorked(session.abortTransaction_forTesting());
-
- // This transaction should complete because it does not use majority write concern.
- jsTestLog("Starting non-majority commit transaction");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // This transaction should not complete because it uses snapshot read concern, majority
- // write concern and the commit point is not advancing.
- jsTestLog("Starting majority-commit snapshot-read transaction");
- session.startTransaction(
- {readConcern: {level: "snapshot"}, writeConcern: {w: "majority", wtimeout: 5000}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- // Allow the majority commit point to advance to allow the failed write concern to clear.
- restartServerReplication(secondary);
- rst.awaitReplication();
- stopServerReplication(secondary);
-
- // Do another local update from another client
- assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 2}, {w: 1}));
-
- // This transaction should not complete because it uses local read concern upconverted to
- // snapshot.
- // TODO(SERVER-34881): Once default read concern is speculative majority, local read
- // concern should not wait for the majority commit point to advance.
- jsTestLog("Starting majority-commit local-read transaction");
- session.startTransaction(
- {readConcern: {level: "local"}, writeConcern: {w: "majority", wtimeout: 5000}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 2});
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- // Allow the majority commit point to advance to allow the failed write concern to clear.
- restartServerReplication(secondary);
- rst.awaitReplication();
- stopServerReplication(secondary);
-
- // Do another local update from another client
- assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 3}, {w: 1}));
-
- // This transaction should not complete because it uses majority read concern, majority
- // write concern, and the commit point is not advancing.
- jsTestLog("Starting majority-commit majority-read transaction");
- session.startTransaction(
- {readConcern: {level: "majority"}, writeConcern: {w: "majority", wtimeout: 5000}});
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 3});
- assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- // Restart server replication to allow majority commit point to advance.
- restartServerReplication(secondary);
-
- session.endSession();
- }
- runTest({causalConsistency: false});
- runTest({causalConsistency: true});
-
- rst.stopSet();
+"use strict";
+load("jstests/libs/write_concern_util.js"); // For stopServerReplication
+
+const dbName = "test";
+const collName = "speculative_read_transaction";
+
+const rst = new ReplSetTest({name: collName, nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const testDB = primary.getDB(dbName);
+const coll = testDB[collName];
+
+function runTest(sessionOptions) {
+ testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+
+ // Do an initial write so we have something to update.
+ assert.commandWorked(coll.insert([{_id: 0, x: 0}], {w: "majority"}));
+ rst.awaitLastOpCommitted();
+
+ // Stop replication on the secondary so the majority commit never moves forward.
+ stopServerReplication(secondary);
+
+ // Do a local update in another client.
+ // The transaction should see this, due to speculative behavior.
+ const otherclient = new Mongo(primary.host);
+ assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 1}, {w: 1}));
+
+ // Initiate a session on the primary.
+ const session = testDB.getMongo().startSession(sessionOptions);
+ const sessionDb = session.getDatabase(dbName);
+ const sessionColl = sessionDb.getCollection(collName);
+
+ // Abort does not wait for write concern.
+ jsTestLog("Starting majority-abort transaction");
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ // This transaction should complete because it does not use majority write concern.
+ jsTestLog("Starting non-majority commit transaction");
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ // This transaction should not complete because it uses snapshot read concern, majority
+ // write concern and the commit point is not advancing.
+ jsTestLog("Starting majority-commit snapshot-read transaction");
+ session.startTransaction(
+ {readConcern: {level: "snapshot"}, writeConcern: {w: "majority", wtimeout: 5000}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ // Allow the majority commit point to advance to allow the failed write concern to clear.
+ restartServerReplication(secondary);
+ rst.awaitReplication();
+ stopServerReplication(secondary);
+
+ // Do another local update from another client
+ assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 2}, {w: 1}));
+
+ // This transaction should not complete because it uses local read concern upconverted to
+ // snapshot.
+ // TODO(SERVER-34881): Once default read concern is speculative majority, local read
+ // concern should not wait for the majority commit point to advance.
+ jsTestLog("Starting majority-commit local-read transaction");
+ session.startTransaction(
+ {readConcern: {level: "local"}, writeConcern: {w: "majority", wtimeout: 5000}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 2});
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ // Allow the majority commit point to advance to allow the failed write concern to clear.
+ restartServerReplication(secondary);
+ rst.awaitReplication();
+ stopServerReplication(secondary);
+
+ // Do another local update from another client
+ assert.commandWorked(otherclient.getDB(dbName)[collName].update({_id: 0}, {x: 3}, {w: 1}));
+
+ // This transaction should not complete because it uses majority read concern, majority
+ // write concern, and the commit point is not advancing.
+ jsTestLog("Starting majority-commit majority-read transaction");
+ session.startTransaction(
+ {readConcern: {level: "majority"}, writeConcern: {w: "majority", wtimeout: 5000}});
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 3});
+ assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ // Restart server replication to allow majority commit point to advance.
+ restartServerReplication(secondary);
+
+ session.endSession();
+}
+runTest({causalConsistency: false});
+runTest({causalConsistency: true});
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/speculative_transaction.js b/jstests/replsets/speculative_transaction.js
index 565b41c8300..e138612dcd1 100644
--- a/jstests/replsets/speculative_transaction.js
+++ b/jstests/replsets/speculative_transaction.js
@@ -6,120 +6,120 @@
* @tags: [uses_transactions, requires_majority_read_concern]
*/
(function() {
- "use strict";
- load("jstests/libs/write_concern_util.js"); // For stopServerReplication
+"use strict";
+load("jstests/libs/write_concern_util.js"); // For stopServerReplication
- const dbName = "test";
- const collName = "speculative_transaction";
+const dbName = "test";
+const collName = "speculative_transaction";
- const rst = new ReplSetTest({name: collName, nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
+const rst = new ReplSetTest({name: collName, nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- var testDB = primary.getDB(dbName);
- const coll = testDB[collName];
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+var testDB = primary.getDB(dbName);
+const coll = testDB[collName];
- function runTest(sessionOptions) {
- testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
+function runTest(sessionOptions) {
+ testDB.runCommand({drop: collName, writeConcern: {w: "majority"}});
- // Do an initial write so we have something to update.
- assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}], {w: "majority"}));
- rst.awaitLastOpCommitted();
+ // Do an initial write so we have something to update.
+ assert.commandWorked(coll.insert([{_id: 0}, {_id: 1}], {w: "majority"}));
+ rst.awaitLastOpCommitted();
- // Stop replication on the secondary so the majority commit never moves forward.
- stopServerReplication(secondary);
+ // Stop replication on the secondary so the majority commit never moves forward.
+ stopServerReplication(secondary);
- // Initiate a session on the primary.
- const session = testDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb.getCollection(collName);
+ // Initiate a session on the primary.
+ const session = testDB.getMongo().startSession(sessionOptions);
+ const sessionDb = session.getDatabase(dbName);
+ const sessionColl = sessionDb.getCollection(collName);
- // Start the first transaction. Do not use majority commit for this one.
- jsTestLog("Starting first transaction");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
+ // Start the first transaction. Do not use majority commit for this one.
+ jsTestLog("Starting first transaction");
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {x: 1}}));
+ assert.commandWorked(sessionColl.update({_id: 0}, {$set: {x: 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+ assert.commandWorked(session.commitTransaction_forTesting());
- // The document should be updated on the local snapshot.
- assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 1});
+ // The document should be updated on the local snapshot.
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 1});
- // The document should not be updated in the majority snapshot.
- assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0});
+ // The document should not be updated in the majority snapshot.
+ assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0});
- jsTestLog("Starting second transaction");
- // Start a second transaction. Still do not use majority commit for this one.
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
+ jsTestLog("Starting second transaction");
+ // Start a second transaction. Still do not use majority commit for this one.
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
- // We should see the updated doc within the transaction as a result of speculative read
- // concern.
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
+ // We should see the updated doc within the transaction as a result of speculative read
+ // concern.
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 1});
- // Update it again.
- assert.commandWorked(sessionColl.update({_id: 0}, {$inc: {x: 1}}));
+ // Update it again.
+ assert.commandWorked(sessionColl.update({_id: 0}, {$inc: {x: 1}}));
- // Update a different document outside the transaction.
- assert.commandWorked(coll.update({_id: 1}, {$set: {y: 1}}));
+ // Update a different document outside the transaction.
+ assert.commandWorked(coll.update({_id: 1}, {$set: {y: 1}}));
- // Within the transaction, we should not see the out-of-transaction update.
- assert.eq(sessionColl.findOne({_id: 1}), {_id: 1});
+ // Within the transaction, we should not see the out-of-transaction update.
+ assert.eq(sessionColl.findOne({_id: 1}), {_id: 1});
- assert.commandWorked(session.commitTransaction_forTesting());
+ assert.commandWorked(session.commitTransaction_forTesting());
- // The document should be updated on the local snapshot.
- assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 2});
+ // The document should be updated on the local snapshot.
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 2});
- // The document should not be updated in the majority snapshot.
- assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0});
+ // The document should not be updated in the majority snapshot.
+ assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0});
- // Make sure write conflicts are caught with speculative transactions.
- jsTestLog("Starting a conflicting transaction which will be auto-aborted");
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
+ // Make sure write conflicts are caught with speculative transactions.
+ jsTestLog("Starting a conflicting transaction which will be auto-aborted");
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: 1}});
- // Read some data inside the transaction.
- assert.eq(sessionColl.findOne({_id: 1}), {_id: 1, y: 1});
+ // Read some data inside the transaction.
+ assert.eq(sessionColl.findOne({_id: 1}), {_id: 1, y: 1});
- // Write it outside the transaction.
- assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}}));
+ // Write it outside the transaction.
+ assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}}));
- // Can still read old data in transaction.
- assert.eq(sessionColl.findOne({_id: 1}), {_id: 1, y: 1});
+ // Can still read old data in transaction.
+ assert.eq(sessionColl.findOne({_id: 1}), {_id: 1, y: 1});
- // But update fails
- assert.commandFailedWithCode(sessionColl.update({_id: 1}, {$inc: {x: 1}}),
- ErrorCodes.WriteConflict);
+ // But update fails
+ assert.commandFailedWithCode(sessionColl.update({_id: 1}, {$inc: {x: 1}}),
+ ErrorCodes.WriteConflict);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
- // Restart server replication to allow majority commit point to advance.
- restartServerReplication(secondary);
+ // Restart server replication to allow majority commit point to advance.
+ restartServerReplication(secondary);
- jsTestLog("Starting final transaction (with majority commit)");
- // Start a third transaction, with majority commit.
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+ jsTestLog("Starting final transaction (with majority commit)");
+ // Start a third transaction, with majority commit.
+ session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- // We should see the updated doc within the transaction.
- assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 2});
+ // We should see the updated doc within the transaction.
+ assert.eq(sessionColl.findOne({_id: 0}), {_id: 0, x: 2});
- // Update it one more time.
- assert.commandWorked(sessionColl.update({_id: 0}, {$inc: {x: 1}}));
+ // Update it one more time.
+ assert.commandWorked(sessionColl.update({_id: 0}, {$inc: {x: 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
+ assert.commandWorked(session.commitTransaction_forTesting());
- // The document should be updated on the local snapshot.
- assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 3});
+ // The document should be updated on the local snapshot.
+ assert.eq(coll.findOne({_id: 0}), {_id: 0, x: 3});
- // The document should also be updated in the majority snapshot.
- assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0, x: 3});
+ // The document should also be updated in the majority snapshot.
+ assert.eq(coll.find({_id: 0}).readConcern("majority").next(), {_id: 0, x: 3});
- session.endSession();
- }
+ session.endSession();
+}
- runTest({causalConsistency: false});
- runTest({causalConsistency: true});
- rst.stopSet();
+runTest({causalConsistency: false});
+runTest({causalConsistency: true});
+rst.stopSet();
}());
diff --git a/jstests/replsets/standalone_replication_recovery_prepare_only.js b/jstests/replsets/standalone_replication_recovery_prepare_only.js
index 4c3910244f2..3c61ff64c8e 100644
--- a/jstests/replsets/standalone_replication_recovery_prepare_only.js
+++ b/jstests/replsets/standalone_replication_recovery_prepare_only.js
@@ -7,10 +7,10 @@
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/prepare_standalone_replication_recovery.js");
+"use strict";
+load("jstests/replsets/libs/prepare_standalone_replication_recovery.js");
- const testName = "standalone_replication_recovery_prepare_only";
+const testName = "standalone_replication_recovery_prepare_only";
- testPrepareRecoverFromOplogAsStandalone(testName, /* commitBeforeRecovery */ false);
+testPrepareRecoverFromOplogAsStandalone(testName, /* commitBeforeRecovery */ false);
})();
diff --git a/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js b/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js
index a09bbb70ecf..e3843d99056 100644
--- a/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js
+++ b/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js
@@ -7,10 +7,10 @@
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
- load("jstests/replsets/libs/prepare_standalone_replication_recovery.js");
+"use strict";
+load("jstests/replsets/libs/prepare_standalone_replication_recovery.js");
- const testName = "standalone_replication_recovery_prepare_with_commit";
+const testName = "standalone_replication_recovery_prepare_with_commit";
- testPrepareRecoverFromOplogAsStandalone(testName, /* commitBeforeRecovery */ true);
+testPrepareRecoverFromOplogAsStandalone(testName, /* commitBeforeRecovery */ true);
})();
diff --git a/jstests/replsets/startParallelShell.js b/jstests/replsets/startParallelShell.js
index 0027c5600e3..cb1838c392f 100644
--- a/jstests/replsets/startParallelShell.js
+++ b/jstests/replsets/startParallelShell.js
@@ -3,34 +3,34 @@
var db;
(function() {
- 'use strict';
+'use strict';
- const setName = 'rs0';
- const replSet = new ReplSetTest({name: setName, nodes: 3});
- const nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate();
+const setName = 'rs0';
+const replSet = new ReplSetTest({name: setName, nodes: 3});
+const nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate();
- const url = replSet.getURL();
- print("* Connecting to " + url);
- const mongo = new Mongo(url);
- db = mongo.getDB('admin');
- assert.eq(url, mongo.host, "replSet.getURL() should match active connection string");
+const url = replSet.getURL();
+print("* Connecting to " + url);
+const mongo = new Mongo(url);
+db = mongo.getDB('admin');
+assert.eq(url, mongo.host, "replSet.getURL() should match active connection string");
- print("* Starting parallel shell on --host " + db.getMongo().host);
- var awaitShell = startParallelShell('db.coll0.insert({test: "connString only"});');
- assert.soon(function() {
- return db.coll0.find({test: "connString only"}).count() === 1;
- });
- awaitShell();
+print("* Starting parallel shell on --host " + db.getMongo().host);
+var awaitShell = startParallelShell('db.coll0.insert({test: "connString only"});');
+assert.soon(function() {
+ return db.coll0.find({test: "connString only"}).count() === 1;
+});
+awaitShell();
- const uri = new MongoURI(url);
- const port0 = uri.servers[0].port;
- print("* Starting parallel shell w/ --port " + port0);
- awaitShell = startParallelShell('db.coll0.insert({test: "explicit port"});', port0);
- assert.soon(function() {
- return db.coll0.find({test: "explicit port"}).count() === 1;
- });
- awaitShell();
- replSet.stopSet();
+const uri = new MongoURI(url);
+const port0 = uri.servers[0].port;
+print("* Starting parallel shell w/ --port " + port0);
+awaitShell = startParallelShell('db.coll0.insert({test: "explicit port"});', port0);
+assert.soon(function() {
+ return db.coll0.find({test: "explicit port"}).count() === 1;
+});
+awaitShell();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js b/jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js
index 340ff978b78..66d3111df82 100644
--- a/jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js
+++ b/jstests/replsets/startup_recovery_commit_transaction_before_stable_timestamp.js
@@ -10,84 +10,84 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- let primary = replTest.getPrimary();
-
- const dbName = "test";
- const collName = "commit_transaction_recovery";
- let testDB = primary.getDB(dbName);
- const testColl = testDB.getCollection(collName);
-
- // Construct a large array such that two arrays in the same document are not greater than the
- // 16MB limit, but that three such arrays in the same document are greater than 16MB. This will
- // be helpful in recreating an idempotency issue that exists when applying the operations from
- // a transaction after the data already reflects the transaction.
- const largeArray = new Array(7 * 1024 * 1024).join('x');
- assert.commandWorked(testColl.insert([{_id: 1, "a": largeArray}]));
-
- // Start a transaction in a session that will be prepared and committed before node restart.
- let session = primary.startSession();
- let sessionDB = session.getDatabase(dbName);
- const sessionColl = sessionDB.getCollection(collName);
- session.startTransaction();
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"b": largeArray}}));
- assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {"b": 1}}));
- assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"c": largeArray}}));
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- const recoveryTimestamp =
- assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 2}]})).operationTime;
-
- jsTestLog("Holding back the stable timestamp to right after the prepareTimestamp");
-
- // Hold back the stable timestamp to be right after the prepareTimestamp, but before the
- // commitTransaction oplog entry so that the transaction will be replayed during startup
- // recovery.
- assert.commandWorked(testDB.adminCommand({
- "configureFailPoint": 'holdStableTimestampAtSpecificTimestamp',
- "mode": 'alwaysOn',
- "data": {"timestamp": recoveryTimestamp}
- }));
-
- jsTestLog("Committing the transaction");
-
- // Since this transaction is committed after the last snapshot, this commit oplog entry will be
- // replayed during startup replication recovery.
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- jsTestLog("Restarting node");
-
- // Perform a clean shutdown and restart. And, the data restored at the storage recovery
- // timestamp should not reflect the transaction. If not, replaying the commit oplog entry during
- // startup recovery would throw BSONTooLarge exception.
- replTest.stop(primary, undefined, {skipValidation: true});
- // Since the oldest timestamp is same as the stable timestamp during node's restart, this test
- // will commit a transaction older than oldest timestamp during startup recovery.
- replTest.start(primary, {}, true);
-
- jsTestLog("Node was restarted");
- primary = replTest.getPrimary();
-
- // Make sure that the data reflects all the operations from the transaction after recovery.
- testDB = primary.getDB(dbName);
- const res = testDB[collName].findOne({_id: 1});
- assert.eq(res, {_id: 1, "a": largeArray, "c": largeArray});
-
- // Make sure that another write on the same document from the transaction has no write conflict.
- // Also, make sure that we can run another transaction after recovery without any problems.
- session = primary.startSession();
- sessionDB = session.getDatabase(dbName);
- session.startTransaction();
- assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
- prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
-
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+let primary = replTest.getPrimary();
+
+const dbName = "test";
+const collName = "commit_transaction_recovery";
+let testDB = primary.getDB(dbName);
+const testColl = testDB.getCollection(collName);
+
+// Construct a large array such that two arrays in the same document are not greater than the
+// 16MB limit, but that three such arrays in the same document are greater than 16MB. This will
+// be helpful in recreating an idempotency issue that exists when applying the operations from
+// a transaction after the data already reflects the transaction.
+const largeArray = new Array(7 * 1024 * 1024).join('x');
+assert.commandWorked(testColl.insert([{_id: 1, "a": largeArray}]));
+
+// Start a transaction in a session that will be prepared and committed before node restart.
+let session = primary.startSession();
+let sessionDB = session.getDatabase(dbName);
+const sessionColl = sessionDB.getCollection(collName);
+session.startTransaction();
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"b": largeArray}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$unset: {"b": 1}}));
+assert.commandWorked(sessionColl.update({_id: 1}, {$set: {"c": largeArray}}));
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+const recoveryTimestamp =
+ assert.commandWorked(testColl.runCommand("insert", {documents: [{_id: 2}]})).operationTime;
+
+jsTestLog("Holding back the stable timestamp to right after the prepareTimestamp");
+
+// Hold back the stable timestamp to be right after the prepareTimestamp, but before the
+// commitTransaction oplog entry so that the transaction will be replayed during startup
+// recovery.
+assert.commandWorked(testDB.adminCommand({
+ "configureFailPoint": 'holdStableTimestampAtSpecificTimestamp',
+ "mode": 'alwaysOn',
+ "data": {"timestamp": recoveryTimestamp}
+}));
+
+jsTestLog("Committing the transaction");
+
+// Since this transaction is committed after the last snapshot, this commit oplog entry will be
+// replayed during startup replication recovery.
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+jsTestLog("Restarting node");
+
+// Perform a clean shutdown and restart. And, the data restored at the storage recovery
+// timestamp should not reflect the transaction. If not, replaying the commit oplog entry during
+// startup recovery would throw BSONTooLarge exception.
+replTest.stop(primary, undefined, {skipValidation: true});
+// Since the oldest timestamp is same as the stable timestamp during node's restart, this test
+// will commit a transaction older than oldest timestamp during startup recovery.
+replTest.start(primary, {}, true);
+
+jsTestLog("Node was restarted");
+primary = replTest.getPrimary();
+
+// Make sure that the data reflects all the operations from the transaction after recovery.
+testDB = primary.getDB(dbName);
+const res = testDB[collName].findOne({_id: 1});
+assert.eq(res, {_id: 1, "a": largeArray, "c": largeArray});
+
+// Make sure that another write on the same document from the transaction has no write conflict.
+// Also, make sure that we can run another transaction after recovery without any problems.
+session = primary.startSession();
+sessionDB = session.getDatabase(dbName);
+session.startTransaction();
+assert.commandWorked(sessionDB[collName].update({_id: 1}, {_id: 1, a: 1}));
+prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+assert.eq(testDB[collName].findOne({_id: 1}), {_id: 1, a: 1});
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js b/jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js
index 1a8c46d5d3d..27237cbe18f 100644
--- a/jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js
+++ b/jstests/replsets/startup_recovery_reconstructs_txn_prepared_before_stable_ts.js
@@ -6,110 +6,110 @@
*/
(function() {
- "use strict";
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/aggregation/extras/utils.js");
-
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
-
- let primary = replTest.getPrimary();
-
- const dbName = "test";
- const collName = "startup_recovery_reconstructs_txn_prepared_before_stable";
- const testDB = primary.getDB(dbName);
- let testColl = testDB.getCollection(collName);
-
- assert.commandWorked(testColl.insert({_id: 0}));
-
- // Start a session on the primary.
- let session = primary.startSession();
- const sessionID = session.getSessionId();
- let sessionDB = session.getDatabase(dbName);
- let sessionColl = sessionDB.getCollection(collName);
-
- // Prepare the transaction on the session.
- session.startTransaction();
- // We are creating a record size of 14MB for _id '0', just to make sure when this
- // test runs with lesser wiredTiger cache size, there would be a higher possibility
- // of this record being considered for eviction from in-memory tree. And, to confirm
- // that we don't see problems like in SERVER-40422.
- const largeArray = new Array(14 * 1024 * 1024).join('x');
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {a: largeArray}}));
- assert.commandWorked(sessionColl.insert({_id: 1}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- // Fastcount reflects the insert of a prepared transaction.
- assert.eq(testColl.count(), 2);
-
- jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
- // Doing a majority write after preparing the transaction ensures that the stable timestamp is
- // past the prepare timestamp because this write must be in the committed snapshot.
- assert.commandWorked(
- testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
-
- // Fastcount reflects the insert of a prepared transaction.
- assert.eq(testColl.count(), 3);
-
- // Check that we have one transaction in the transactions table.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
-
- jsTestLog("Restarting node");
- // Perform a clean shutdown and restart. And, the data restored at the storage recovery
- // timestamp should not reflect the prepared transaction.
- replTest.stop(primary, undefined, {skipValidation: true});
- // Since the oldest timestamp is same as the stable timestamp during node's restart, this test
- // will reconstruct a prepared transaction older than oldest timestamp during startup recovery.
- replTest.start(primary, {}, true);
-
- jsTestLog("Node was restarted");
- primary = replTest.getPrimary();
- testColl = primary.getDB(dbName)[collName];
-
- // Make sure we cannot see the writes from the prepared transaction yet.
- arrayEq(testColl.find().toArray(), [{_id: 0}, {_id: 2}]);
- assert.eq(testColl.count(), 3);
-
- // Make sure there is still one transaction in the transactions table. This is because the
- // entry in the transactions table is made durable when a transaction is prepared.
- assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
-
- // Make sure we can successfully commit the recovered prepared transaction.
- session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
- sessionDB = session.getDatabase(dbName);
- // The transaction on this session should have a txnNumber of 0. We explicitly set this
- // since createSessionWithGivenId does not restore the current txnNumber in the shell.
- session.setTxnNumber_forTesting(0);
- const txnNumber = session.getTxnNumber_forTesting();
-
- // Make sure we cannot add any operations to a prepared transaction.
- assert.commandFailedWithCode(sessionDB.runCommand({
- insert: collName,
- txnNumber: NumberLong(txnNumber),
- documents: [{_id: 10}],
- autocommit: false,
- }),
- ErrorCodes.PreparedTransactionInProgress);
-
- // Make sure that writing to a document that was updated in the prepared transaction causes
- // a write conflict.
- assert.commandFailedWithCode(
- sessionDB.runCommand(
- {update: collName, updates: [{q: {_id: 0}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
- ErrorCodes.MaxTimeMSExpired);
-
- jsTestLog("Committing the prepared transaction");
- assert.commandWorked(sessionDB.adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTimestamp,
- txnNumber: NumberLong(txnNumber),
- autocommit: false,
- }));
-
- // Make sure we can see the effects of the prepared transaction.
- arrayEq(testColl.find().toArray(), [{_id: 0, a: largeArray}, {_id: 1}, {_id: 2}]);
- assert.eq(testColl.count(), 3);
-
- replTest.stopSet();
+"use strict";
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/aggregation/extras/utils.js");
+
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
+
+let primary = replTest.getPrimary();
+
+const dbName = "test";
+const collName = "startup_recovery_reconstructs_txn_prepared_before_stable";
+const testDB = primary.getDB(dbName);
+let testColl = testDB.getCollection(collName);
+
+assert.commandWorked(testColl.insert({_id: 0}));
+
+// Start a session on the primary.
+let session = primary.startSession();
+const sessionID = session.getSessionId();
+let sessionDB = session.getDatabase(dbName);
+let sessionColl = sessionDB.getCollection(collName);
+
+// Prepare the transaction on the session.
+session.startTransaction();
+// We are creating a record size of 14MB for _id '0', just to make sure when this
+// test runs with lesser wiredTiger cache size, there would be a higher possibility
+// of this record being considered for eviction from in-memory tree. And, to confirm
+// that we don't see problems like in SERVER-40422.
+const largeArray = new Array(14 * 1024 * 1024).join('x');
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {a: largeArray}}));
+assert.commandWorked(sessionColl.insert({_id: 1}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+// Fastcount reflects the insert of a prepared transaction.
+assert.eq(testColl.count(), 2);
+
+jsTestLog("Do a majority write to advance the stable timestamp past the prepareTimestamp");
+// Doing a majority write after preparing the transaction ensures that the stable timestamp is
+// past the prepare timestamp because this write must be in the committed snapshot.
+assert.commandWorked(
+ testColl.runCommand("insert", {documents: [{_id: 2}]}, {writeConcern: {w: "majority"}}));
+
+// Fastcount reflects the insert of a prepared transaction.
+assert.eq(testColl.count(), 3);
+
+// Check that we have one transaction in the transactions table.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
+
+jsTestLog("Restarting node");
+// Perform a clean shutdown and restart. And, the data restored at the storage recovery
+// timestamp should not reflect the prepared transaction.
+replTest.stop(primary, undefined, {skipValidation: true});
+// Since the oldest timestamp is same as the stable timestamp during node's restart, this test
+// will reconstruct a prepared transaction older than oldest timestamp during startup recovery.
+replTest.start(primary, {}, true);
+
+jsTestLog("Node was restarted");
+primary = replTest.getPrimary();
+testColl = primary.getDB(dbName)[collName];
+
+// Make sure we cannot see the writes from the prepared transaction yet.
+arrayEq(testColl.find().toArray(), [{_id: 0}, {_id: 2}]);
+assert.eq(testColl.count(), 3);
+
+// Make sure there is still one transaction in the transactions table. This is because the
+// entry in the transactions table is made durable when a transaction is prepared.
+assert.eq(primary.getDB('config')['transactions'].find().itcount(), 1);
+
+// Make sure we can successfully commit the recovered prepared transaction.
+session = PrepareHelpers.createSessionWithGivenId(primary, sessionID);
+sessionDB = session.getDatabase(dbName);
+// The transaction on this session should have a txnNumber of 0. We explicitly set this
+// since createSessionWithGivenId does not restore the current txnNumber in the shell.
+session.setTxnNumber_forTesting(0);
+const txnNumber = session.getTxnNumber_forTesting();
+
+// Make sure we cannot add any operations to a prepared transaction.
+assert.commandFailedWithCode(sessionDB.runCommand({
+ insert: collName,
+ txnNumber: NumberLong(txnNumber),
+ documents: [{_id: 10}],
+ autocommit: false,
+}),
+ ErrorCodes.PreparedTransactionInProgress);
+
+// Make sure that writing to a document that was updated in the prepared transaction causes
+// a write conflict.
+assert.commandFailedWithCode(
+ sessionDB.runCommand(
+ {update: collName, updates: [{q: {_id: 0}, u: {$set: {a: 2}}}], maxTimeMS: 5 * 1000}),
+ ErrorCodes.MaxTimeMSExpired);
+
+jsTestLog("Committing the prepared transaction");
+assert.commandWorked(sessionDB.adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTimestamp,
+ txnNumber: NumberLong(txnNumber),
+ autocommit: false,
+}));
+
+// Make sure we can see the effects of the prepared transaction.
+arrayEq(testColl.find().toArray(), [{_id: 0, a: largeArray}, {_id: 1}, {_id: 2}]);
+assert.eq(testColl.count(), 3);
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js b/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js
index 0dca4e5efd6..3b83bda0888 100644
--- a/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js
+++ b/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js
@@ -4,40 +4,40 @@
*/
(function() {
- load("jstests/libs/check_log.js");
- load("jstests/libs/feature_compatibility_version.js");
-
- rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
-
- jsTestLog("Adding a second node to the replica set.");
-
- const adminDbName = "admin";
- const versionCollName = "system.version";
- const nss = adminDbName + "." + versionCollName;
-
- // Hang initial sync before cloning the FCV document.
- let secondary = rst.add({rsConfig: {priority: 0}});
- assert.commandWorked(secondary.getDB('admin').runCommand({
- configureFailPoint: 'initialSyncHangBeforeCollectionClone',
- mode: 'alwaysOn',
- data: {namespace: nss}
- }));
- rst.reInitiate();
- checkLog.contains(secondary, "initialSyncHangBeforeCollectionClone fail point enabled.");
-
- jsTestLog("Restarting secondary in the early stages of initial sync.");
- rst.restart(secondary);
-
- rst.awaitSecondaryNodes();
-
- // Get the new secondary connection.
- secondary = rst.getSecondary();
- secondary.setSlaveOk(true);
-
- const secondaryAdminDb = secondary.getDB("admin");
- // Assert that the FCV document was cloned through initial sync on the secondary.
- checkFCV(secondaryAdminDb, latestFCV);
- rst.stopSet();
+load("jstests/libs/check_log.js");
+load("jstests/libs/feature_compatibility_version.js");
+
+rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+
+jsTestLog("Adding a second node to the replica set.");
+
+const adminDbName = "admin";
+const versionCollName = "system.version";
+const nss = adminDbName + "." + versionCollName;
+
+// Hang initial sync before cloning the FCV document.
+let secondary = rst.add({rsConfig: {priority: 0}});
+assert.commandWorked(secondary.getDB('admin').runCommand({
+ configureFailPoint: 'initialSyncHangBeforeCollectionClone',
+ mode: 'alwaysOn',
+ data: {namespace: nss}
+}));
+rst.reInitiate();
+checkLog.contains(secondary, "initialSyncHangBeforeCollectionClone fail point enabled.");
+
+jsTestLog("Restarting secondary in the early stages of initial sync.");
+rst.restart(secondary);
+
+rst.awaitSecondaryNodes();
+
+// Get the new secondary connection.
+secondary = rst.getSecondary();
+secondary.setSlaveOk(true);
+
+const secondaryAdminDb = secondary.getDB("admin");
+// Assert that the FCV document was cloned through initial sync on the secondary.
+checkFCV(secondaryAdminDb, latestFCV);
+rst.stopSet();
}());
diff --git a/jstests/replsets/step_down_during_draining.js b/jstests/replsets/step_down_during_draining.js
index bedf5facb8e..47c8ee2651a 100644
--- a/jstests/replsets/step_down_during_draining.js
+++ b/jstests/replsets/step_down_during_draining.js
@@ -12,122 +12,121 @@
load("jstests/replsets/rslib.js");
(function() {
- "use strict";
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- var conf = replSet.getReplSetConfig();
- conf.members[2].priority = 0;
- conf.settings = conf.settings || {};
- conf.settings.chainingAllowed = false;
- conf.settings.catchUpTimeoutMillis = 0;
- replSet.initiate(conf);
-
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
-
- // Set verbosity for replication on all nodes.
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": {
- "replication": {"verbosity": 3},
- }
- };
- replSet.nodes.forEach(function(node) {
- node.adminCommand(verbosity);
- });
-
- function enableFailPoint(node) {
- jsTest.log("enable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+"use strict";
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+var conf = replSet.getReplSetConfig();
+conf.members[2].priority = 0;
+conf.settings = conf.settings || {};
+conf.settings.chainingAllowed = false;
+conf.settings.catchUpTimeoutMillis = 0;
+replSet.initiate(conf);
+
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
+
+// Set verbosity for replication on all nodes.
+var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {
+ "replication": {"verbosity": 3},
}
+};
+replSet.nodes.forEach(function(node) {
+ node.adminCommand(verbosity);
+});
- function disableFailPoint(node) {
- jsTest.log("disable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- }
-
- // Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
- // that waits for a node to leave drain mode.
- function stepUpNode(node) {
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
- // We do not specify a specific primary so that if a different primary gets elected
- // due to unfortunate timing we can try again.
- replSet.awaitNodesAgreeOnPrimary();
- return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
- }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
- }
-
- // Do an initial insert to prevent the secondary from going into recovery
- var numDocuments = 20;
- var coll = primary.getDB("foo").foo;
- assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
- replSet.awaitReplication();
-
- // Enable fail point to stop replication.
- var secondaries = replSet.getSecondaries();
- secondaries.forEach(enableFailPoint);
-
- var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
- for (var i = 1; i < numDocuments; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
- jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
- assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
-
- assert.soon(
- function() {
- var serverStatus = secondary.getDB('foo').serverStatus();
- var bufferCount = serverStatus.metrics.repl.buffer.count;
- var bufferCountChange = bufferCount - bufferCountBefore;
- jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
- bufferCountChange);
- return bufferCountChange == numDocuments - 1;
- },
- 'secondary did not buffer operations for new inserts on primary',
- replSet.kDefaultTimeoutMs,
- 1000);
-
- reconnect(secondary);
- stepUpNode(secondary);
-
- // Secondary doesn't allow writes yet.
- var res = secondary.getDB("admin").runCommand({"isMaster": 1});
- assert(!res.ismaster);
-
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
-
- // Original primary steps up.
- reconnect(primary);
- stepUpNode(primary);
-
- reconnect(secondary);
- stepUpNode(secondary);
-
- // Disable fail point to allow replication.
- secondaries.forEach(disableFailPoint);
-
+function enableFailPoint(node) {
+ jsTest.log("enable failpoint " + node.host);
assert.commandWorked(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: replSet.kDefaultTimeoutMS,
- }),
- 'replSetTest waitForDrainFinish should work when draining is allowed to complete');
-
- // Ensure new primary is writable.
- jsTestLog('New primary should be writable after draining is complete');
- assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}));
- // Check that all writes reached the secondary's op queue prior to
- // stepping down the original primary and got applied.
- assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
- replSet.stopSet();
+ node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+}
+
+function disableFailPoint(node) {
+ jsTest.log("disable failpoint " + node.host);
+ assert.commandWorked(node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+}
+
+// Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
+// that waits for a node to leave drain mode.
+function stepUpNode(node) {
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
+ // We do not specify a specific primary so that if a different primary gets elected
+ // due to unfortunate timing we can try again.
+ replSet.awaitNodesAgreeOnPrimary();
+ return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
+ }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
+}
+
+// Do an initial insert to prevent the secondary from going into recovery
+var numDocuments = 20;
+var coll = primary.getDB("foo").foo;
+assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
+replSet.awaitReplication();
+
+// Enable fail point to stop replication.
+var secondaries = replSet.getSecondaries();
+secondaries.forEach(enableFailPoint);
+
+var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
+for (var i = 1; i < numDocuments; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
+jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
+assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
+
+assert.soon(
+ function() {
+ var serverStatus = secondary.getDB('foo').serverStatus();
+ var bufferCount = serverStatus.metrics.repl.buffer.count;
+ var bufferCountChange = bufferCount - bufferCountBefore;
+ jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
+ bufferCountChange);
+ return bufferCountChange == numDocuments - 1;
+ },
+ 'secondary did not buffer operations for new inserts on primary',
+ replSet.kDefaultTimeoutMs,
+ 1000);
+
+reconnect(secondary);
+stepUpNode(secondary);
+
+// Secondary doesn't allow writes yet.
+var res = secondary.getDB("admin").runCommand({"isMaster": 1});
+assert(!res.ismaster);
+
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
+
+// Original primary steps up.
+reconnect(primary);
+stepUpNode(primary);
+
+reconnect(secondary);
+stepUpNode(secondary);
+
+// Disable fail point to allow replication.
+secondaries.forEach(disableFailPoint);
+
+assert.commandWorked(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: replSet.kDefaultTimeoutMS,
+ }),
+ 'replSetTest waitForDrainFinish should work when draining is allowed to complete');
+
+// Ensure new primary is writable.
+jsTestLog('New primary should be writable after draining is complete');
+assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}));
+// Check that all writes reached the secondary's op queue prior to
+// stepping down the original primary and got applied.
+assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/step_down_during_draining2.js b/jstests/replsets/step_down_during_draining2.js
index 1687d39d7c4..1e97f93865a 100644
--- a/jstests/replsets/step_down_during_draining2.js
+++ b/jstests/replsets/step_down_during_draining2.js
@@ -11,163 +11,162 @@
// 7. Allow Node 1 to finish stepping down.
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js");
- load("jstests/libs/check_log.js");
-
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- var conf = replSet.getReplSetConfig();
- conf.members[2].priority = 0;
- conf.settings = conf.settings || {};
- conf.settings.chainingAllowed = false;
- conf.settings.catchUpTimeoutMillis = 0;
- replSet.initiate(conf);
-
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
-
- // Set verbosity for replication on all nodes.
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": {
- "replication": {"verbosity": 3},
- }
- };
- replSet.nodes.forEach(function(node) {
- node.adminCommand(verbosity);
- });
-
- function enableFailPoint(node) {
- jsTest.log("enable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+"use strict";
+
+load("jstests/replsets/rslib.js");
+load("jstests/libs/check_log.js");
+
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+var conf = replSet.getReplSetConfig();
+conf.members[2].priority = 0;
+conf.settings = conf.settings || {};
+conf.settings.chainingAllowed = false;
+conf.settings.catchUpTimeoutMillis = 0;
+replSet.initiate(conf);
+
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
+
+// Set verbosity for replication on all nodes.
+var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {
+ "replication": {"verbosity": 3},
}
+};
+replSet.nodes.forEach(function(node) {
+ node.adminCommand(verbosity);
+});
- function disableFailPoint(node) {
- jsTest.log("disable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- }
-
- // Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
- // that waits for a node to leave drain mode.
- function stepUpNode(node) {
- jsTest.log("Stepping up: " + node.host);
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
- // We do not specify a specific primary so that if a different primary gets elected
- // due to unfortunate timing we can try again.
- replSet.awaitNodesAgreeOnPrimary();
- return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
- }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
- }
-
- // Do an initial insert to prevent the secondary from going into recovery
- var numDocuments = 20;
- var coll = primary.getDB("foo").foo;
- assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
- replSet.awaitReplication();
-
- // Enable fail point to stop replication.
- var secondaries = replSet.getSecondaries();
- secondaries.forEach(enableFailPoint);
-
- var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
- for (var i = 1; i < numDocuments; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
- jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
- assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
-
- assert.soon(
- function() {
- var serverStatus = secondary.getDB('foo').serverStatus();
- var bufferCount = serverStatus.metrics.repl.buffer.count;
- var bufferCountChange = bufferCount - bufferCountBefore;
- jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
- bufferCountChange);
- return bufferCountChange == numDocuments - 1;
- },
- 'secondary did not buffer operations for new inserts on primary',
- replSet.kDefaultTimeoutMs,
- 1000);
-
- reconnect(secondary);
- stepUpNode(secondary);
-
- // Secondary doesn't allow writes yet.
- var res = secondary.getDB("admin").runCommand({"isMaster": 1});
- assert(!res.ismaster);
-
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
-
- // Prevent the current primary from stepping down
- jsTest.log("disallowing heartbeat stepdown " + secondary.host);
- assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "blockHeartbeatStepdown", mode: 'alwaysOn'}));
- jsTestLog("Shut down the rest of the set so the primary-elect has to step down");
- replSet.stop(primary);
- disableFailPoint(replSet.nodes[2]); // Fail point needs to be off when node is shut down.
- replSet.stop(2);
-
- jsTestLog("Waiting for secondary to begin stepping down while in drain mode");
- checkLog.contains(secondary, "stepDown - blockHeartbeatStepdown fail point enabled");
-
- // Disable fail point to allow replication and allow secondary to finish drain mode while in the
- // process of stepping down.
- jsTestLog("Re-enabling replication on secondary");
- assert.gt(numDocuments, secondary.getDB("foo").foo.find().itcount());
- disableFailPoint(secondary);
-
- // The node should now be able to apply the writes in its buffer.
- jsTestLog("Waiting for node to drain its apply buffer");
- assert.soon(function() {
- return secondary.getDB("foo").foo.find().itcount() == numDocuments;
- });
-
- // Even though it finished draining its buffer, it shouldn't be able to exit drain mode due to
- // pending stepdown.
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when in the middle of stepping down');
-
- jsTestLog("Checking that node is PRIMARY but not master");
- assert.eq(ReplSetTest.State.PRIMARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
- assert(!secondary.adminCommand('ismaster').ismaster);
-
- jsTest.log("allowing heartbeat stepdown " + secondary.host);
+function enableFailPoint(node) {
+ jsTest.log("enable failpoint " + node.host);
assert.commandWorked(
- secondary.adminCommand({configureFailPoint: "blockHeartbeatStepdown", mode: 'off'}));
-
- jsTestLog("Checking that node successfully stepped down");
- replSet.waitForState(secondary, ReplSetTest.State.SECONDARY);
- assert(!secondary.adminCommand('ismaster').ismaster);
-
- // Now ensure that the node can successfully become primary again.
- replSet.restart(0);
- replSet.restart(2);
- stepUpNode(secondary);
-
- assert.soon(function() {
- return secondary.adminCommand('ismaster').ismaster;
- });
-
- jsTestLog('Ensure new primary is writable.');
- assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}, {writeConcern: {w: 3}}));
- // Check that no writes were lost.
- assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
- replSet.stopSet();
+ node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+}
+
+function disableFailPoint(node) {
+ jsTest.log("disable failpoint " + node.host);
+ assert.commandWorked(node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+}
+
+// Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
+// that waits for a node to leave drain mode.
+function stepUpNode(node) {
+ jsTest.log("Stepping up: " + node.host);
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
+ // We do not specify a specific primary so that if a different primary gets elected
+ // due to unfortunate timing we can try again.
+ replSet.awaitNodesAgreeOnPrimary();
+ return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
+ }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
+}
+
+// Do an initial insert to prevent the secondary from going into recovery
+var numDocuments = 20;
+var coll = primary.getDB("foo").foo;
+assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
+replSet.awaitReplication();
+
+// Enable fail point to stop replication.
+var secondaries = replSet.getSecondaries();
+secondaries.forEach(enableFailPoint);
+
+var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
+for (var i = 1; i < numDocuments; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
+jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
+assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
+
+assert.soon(
+ function() {
+ var serverStatus = secondary.getDB('foo').serverStatus();
+ var bufferCount = serverStatus.metrics.repl.buffer.count;
+ var bufferCountChange = bufferCount - bufferCountBefore;
+ jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
+ bufferCountChange);
+ return bufferCountChange == numDocuments - 1;
+ },
+ 'secondary did not buffer operations for new inserts on primary',
+ replSet.kDefaultTimeoutMs,
+ 1000);
+
+reconnect(secondary);
+stepUpNode(secondary);
+
+// Secondary doesn't allow writes yet.
+var res = secondary.getDB("admin").runCommand({"isMaster": 1});
+assert(!res.ismaster);
+
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
+
+// Prevent the current primary from stepping down
+jsTest.log("disallowing heartbeat stepdown " + secondary.host);
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "blockHeartbeatStepdown", mode: 'alwaysOn'}));
+jsTestLog("Shut down the rest of the set so the primary-elect has to step down");
+replSet.stop(primary);
+disableFailPoint(replSet.nodes[2]); // Fail point needs to be off when node is shut down.
+replSet.stop(2);
+
+jsTestLog("Waiting for secondary to begin stepping down while in drain mode");
+checkLog.contains(secondary, "stepDown - blockHeartbeatStepdown fail point enabled");
+
+// Disable fail point to allow replication and allow secondary to finish drain mode while in the
+// process of stepping down.
+jsTestLog("Re-enabling replication on secondary");
+assert.gt(numDocuments, secondary.getDB("foo").foo.find().itcount());
+disableFailPoint(secondary);
+
+// The node should now be able to apply the writes in its buffer.
+jsTestLog("Waiting for node to drain its apply buffer");
+assert.soon(function() {
+ return secondary.getDB("foo").foo.find().itcount() == numDocuments;
+});
+
+// Even though it finished draining its buffer, it shouldn't be able to exit drain mode due to
+// pending stepdown.
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when in the middle of stepping down');
+
+jsTestLog("Checking that node is PRIMARY but not master");
+assert.eq(ReplSetTest.State.PRIMARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
+assert(!secondary.adminCommand('ismaster').ismaster);
+
+jsTest.log("allowing heartbeat stepdown " + secondary.host);
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: "blockHeartbeatStepdown", mode: 'off'}));
+
+jsTestLog("Checking that node successfully stepped down");
+replSet.waitForState(secondary, ReplSetTest.State.SECONDARY);
+assert(!secondary.adminCommand('ismaster').ismaster);
+
+// Now ensure that the node can successfully become primary again.
+replSet.restart(0);
+replSet.restart(2);
+stepUpNode(secondary);
+
+assert.soon(function() {
+ return secondary.adminCommand('ismaster').ismaster;
+});
+
+jsTestLog('Ensure new primary is writable.');
+assert.writeOK(secondary.getDB("foo").flag.insert({sentinel: 1}, {writeConcern: {w: 3}}));
+// Check that no writes were lost.
+assert.eq(secondary.getDB("foo").foo.find().itcount(), numDocuments);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/step_down_during_draining3.js b/jstests/replsets/step_down_during_draining3.js
index c8631bd12f1..98c42955fc6 100644
--- a/jstests/replsets/step_down_during_draining3.js
+++ b/jstests/replsets/step_down_during_draining3.js
@@ -1,123 +1,122 @@
// Test that the stepdown command can be run successfully during drain mode
(function() {
- "use strict";
-
- load("jstests/replsets/rslib.js");
-
- var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- var conf = replSet.getReplSetConfig();
- conf.members[2].priority = 0;
- conf.settings = conf.settings || {};
- conf.settings.chainingAllowed = false;
- conf.settings.catchUpTimeoutMillis = 0;
- replSet.initiate(conf);
-
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
-
- // Set verbosity for replication on all nodes.
- var verbosity = {
- "setParameter": 1,
- "logComponentVerbosity": {
- "replication": {"verbosity": 3},
- }
- };
- replSet.nodes.forEach(function(node) {
- node.adminCommand(verbosity);
- });
-
- function enableFailPoint(node) {
- jsTest.log("enable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+"use strict";
+
+load("jstests/replsets/rslib.js");
+
+var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+var conf = replSet.getReplSetConfig();
+conf.members[2].priority = 0;
+conf.settings = conf.settings || {};
+conf.settings.chainingAllowed = false;
+conf.settings.catchUpTimeoutMillis = 0;
+replSet.initiate(conf);
+
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
+
+// Set verbosity for replication on all nodes.
+var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {
+ "replication": {"verbosity": 3},
}
+};
+replSet.nodes.forEach(function(node) {
+ node.adminCommand(verbosity);
+});
- function disableFailPoint(node) {
- jsTest.log("disable failpoint " + node.host);
- assert.commandWorked(
- node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- }
-
- // Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
- // that waits for a node to leave drain mode.
- function stepUpNode(node) {
- jsTest.log("Stepping up: " + node.host);
- assert.soonNoExcept(function() {
- assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
- // We do not specify a specific primary so that if a different primary gets elected
- // due to unfortunate timing we can try again.
- replSet.awaitNodesAgreeOnPrimary();
- return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
- }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
- }
-
- // Do an initial insert to prevent the secondary from going into recovery
- var numDocuments = 20;
- var coll = primary.getDB("foo").foo;
- assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
- replSet.awaitReplication();
-
- // Enable fail point to stop replication.
- var secondaries = replSet.getSecondaries();
- secondaries.forEach(enableFailPoint);
-
- var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
- for (var i = 1; i < numDocuments; ++i) {
- assert.writeOK(coll.insert({x: i}));
- }
- jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
- assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
-
- assert.soon(
- function() {
- var serverStatus = secondary.getDB('foo').serverStatus();
- var bufferCount = serverStatus.metrics.repl.buffer.count;
- var bufferCountChange = bufferCount - bufferCountBefore;
- jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
- bufferCountChange);
- return bufferCountChange == numDocuments - 1;
- },
- 'secondary did not buffer operations for new inserts on primary',
- replSet.kDefaultTimeoutMs,
- 1000);
-
- reconnect(secondary);
- stepUpNode(secondary);
-
- // Secondary doesn't allow writes yet.
- var res = secondary.getDB("admin").runCommand({"isMaster": 1});
- assert(!res.ismaster);
-
- assert.commandFailedWithCode(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 5000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
-
- assert.commandWorked(secondary.adminCommand({replSetStepDown: 60, force: true}));
-
- // Assert stepdown was successful.
- assert.eq(ReplSetTest.State.SECONDARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
- assert(!secondary.adminCommand('ismaster').ismaster);
-
- // Prevent the producer from fetching new ops
+function enableFailPoint(node) {
+ jsTest.log("enable failpoint " + node.host);
assert.commandWorked(
- secondary.adminCommand({configureFailPoint: 'stopReplProducer', mode: 'alwaysOn'}));
-
- // Allow the secondary to apply the ops already in its buffer.
- jsTestLog("Re-enabling replication on secondaries");
- assert.gt(numDocuments, secondary.getDB("foo").foo.find().itcount());
- secondaries.forEach(disableFailPoint);
-
- // The node should now be able to apply the writes in its buffer.
- jsTestLog("Waiting for node to drain its apply buffer");
- assert.soon(function() {
- return secondary.getDB("foo").foo.find().itcount() == numDocuments;
- });
- replSet.stopSet();
+ node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+}
+
+function disableFailPoint(node) {
+ jsTest.log("disable failpoint " + node.host);
+ assert.commandWorked(node.adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+}
+
+// Since this test blocks a node in drain mode, we cannot use the ReplSetTest stepUp helper
+// that waits for a node to leave drain mode.
+function stepUpNode(node) {
+ jsTest.log("Stepping up: " + node.host);
+ assert.soonNoExcept(function() {
+ assert.commandWorked(node.adminCommand({replSetStepUp: 1}));
+ // We do not specify a specific primary so that if a different primary gets elected
+ // due to unfortunate timing we can try again.
+ replSet.awaitNodesAgreeOnPrimary();
+ return node.adminCommand('replSetGetStatus').myState === ReplSetTest.State.PRIMARY;
+ }, 'failed to step up node ' + node.host, replSet.kDefaultTimeoutMS);
+}
+
+// Do an initial insert to prevent the secondary from going into recovery
+var numDocuments = 20;
+var coll = primary.getDB("foo").foo;
+assert.writeOK(coll.insert({x: 0}, {writeConcern: {w: 3}}));
+replSet.awaitReplication();
+
+// Enable fail point to stop replication.
+var secondaries = replSet.getSecondaries();
+secondaries.forEach(enableFailPoint);
+
+var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
+for (var i = 1; i < numDocuments; ++i) {
+ assert.writeOK(coll.insert({x: i}));
+}
+jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
+assert.eq(numDocuments, primary.getDB("foo").foo.find().itcount());
+
+assert.soon(
+ function() {
+ var serverStatus = secondary.getDB('foo').serverStatus();
+ var bufferCount = serverStatus.metrics.repl.buffer.count;
+ var bufferCountChange = bufferCount - bufferCountBefore;
+ jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
+ bufferCountChange);
+ return bufferCountChange == numDocuments - 1;
+ },
+ 'secondary did not buffer operations for new inserts on primary',
+ replSet.kDefaultTimeoutMs,
+ 1000);
+
+reconnect(secondary);
+stepUpNode(secondary);
+
+// Secondary doesn't allow writes yet.
+var res = secondary.getDB("admin").runCommand({"isMaster": 1});
+assert(!res.ismaster);
+
+assert.commandFailedWithCode(
+ secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 5000,
+ }),
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
+
+assert.commandWorked(secondary.adminCommand({replSetStepDown: 60, force: true}));
+
+// Assert stepdown was successful.
+assert.eq(ReplSetTest.State.SECONDARY, secondary.adminCommand({replSetGetStatus: 1}).myState);
+assert(!secondary.adminCommand('ismaster').ismaster);
+
+// Prevent the producer from fetching new ops
+assert.commandWorked(
+ secondary.adminCommand({configureFailPoint: 'stopReplProducer', mode: 'alwaysOn'}));
+
+// Allow the secondary to apply the ops already in its buffer.
+jsTestLog("Re-enabling replication on secondaries");
+assert.gt(numDocuments, secondary.getDB("foo").foo.find().itcount());
+secondaries.forEach(disableFailPoint);
+
+// The node should now be able to apply the writes in its buffer.
+jsTestLog("Waiting for node to drain its apply buffer");
+assert.soon(function() {
+ return secondary.getDB("foo").foo.find().itcount() == numDocuments;
+});
+replSet.stopSet();
})();
diff --git a/jstests/replsets/step_down_on_secondary.js b/jstests/replsets/step_down_on_secondary.js
index 2a5c279422b..0420374a577 100644
--- a/jstests/replsets/step_down_on_secondary.js
+++ b/jstests/replsets/step_down_on_secondary.js
@@ -13,123 +13,123 @@
*/
(function() {
- "use strict";
- load('jstests/libs/parallelTester.js');
- load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
- load("jstests/core/txns/libs/prepare_helpers.js");
- load("jstests/libs/check_log.js");
-
- const dbName = "test";
- const collName = "coll";
-
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
- const primaryColl = primaryDB[collName];
- const collNss = primaryColl.getFullName();
- const secondary = rst.getSecondary();
-
- TestData.dbName = dbName;
- TestData.collName = collName;
- TestData.collNss = collNss;
-
- jsTestLog("Do a document write");
- assert.commandWorked(primaryColl.insert({_id: 0}, {"writeConcern": {"w": "majority"}}));
- rst.awaitReplication();
-
- jsTestLog("Hang primary on step down");
- const joinStepDownThread = startParallelShell(() => {
- assert.commandWorked(db.adminCommand(
- {configureFailPoint: "stepdownHangBeforeRSTLEnqueue", mode: "alwaysOn"}));
-
- const freezeSecs = 24 * 60 * 60; // 24 hours
- assert.commandFailedWithCode(
- db.adminCommand({"replSetStepDown": freezeSecs, "force": true}), ErrorCodes.NotMaster);
- }, primary.port);
-
- waitForCurOpByFailPointNoNS(primaryDB, "stepdownHangBeforeRSTLEnqueue");
-
- jsTestLog("Force reconfig to swap the electable node");
- const newConfig = rst.getReplSetConfigFromNode();
- const oldPrimaryId = rst.getNodeId(primary);
- const newPrimaryId = rst.getNodeId(secondary);
- newConfig.members[newPrimaryId].priority = 1;
- newConfig.members[oldPrimaryId].priority = 0;
- newConfig.version++;
- assert.commandWorked(secondary.adminCommand({"replSetReconfig": newConfig, force: true}));
-
- jsTestLog("Step up the new electable node");
- rst.stepUp(secondary);
-
- jsTestLog("Wait for step up to complete");
- // Wait until the primary successfully steps down via heartbeat reconfig.
- rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
- const newPrimary = rst.getPrimary();
-
- jsTestLog("Prepare a transaction on the new primary");
- const session = newPrimary.startSession();
- const sessionDb = session.getDatabase(dbName);
- const sessionColl = sessionDb[collName];
- session.startTransaction({writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {"b": 1}}));
- const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
-
- jsTestLog("Get a cluster time for afterClusterTime reads");
- TestData.clusterTimeAfterPrepare =
- assert
- .commandWorked(newPrimary.getDB(dbName)[collName].runCommand(
- "insert", {documents: [{_id: "clusterTimeAfterPrepare"}]}))
- .operationTime;
-
- // Make sure the insert gets replicated to the old primary (current secondary) so that its
- // clusterTime advances before we try to do an afterClusterTime read at the time of the insert.
- rst.awaitReplication();
-
- jsTestLog("Do a read that hits a prepare conflict on the old primary");
+"use strict";
+load('jstests/libs/parallelTester.js');
+load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/check_log.js");
+
+const dbName = "test";
+const collName = "coll";
+
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+const primaryColl = primaryDB[collName];
+const collNss = primaryColl.getFullName();
+const secondary = rst.getSecondary();
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+TestData.collNss = collNss;
+
+jsTestLog("Do a document write");
+assert.commandWorked(primaryColl.insert({_id: 0}, {"writeConcern": {"w": "majority"}}));
+rst.awaitReplication();
+
+jsTestLog("Hang primary on step down");
+const joinStepDownThread = startParallelShell(() => {
assert.commandWorked(
- primary.adminCommand({configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
-
- const joinReadThread = startParallelShell(() => {
- db.getMongo().setSlaveOk(true);
- oldPrimaryDB = db.getSiblingDB(TestData.dbName);
-
- assert.commandFailedWithCode(oldPrimaryDB.runCommand({
- find: TestData.collName,
- filter: {_id: 0},
- readConcern: {level: "local", afterClusterTime: TestData.clusterTimeAfterPrepare},
- }),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, primary.port);
-
- jsTestLog("Wait to hit a prepare conflict");
- checkLog.contains(primary, "WTPrintPrepareConflictLog fail point enabled");
-
- jsTestLog("Allow step down to complete");
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: "stepdownHangBeforeRSTLEnqueue", mode: "off"}));
-
- jsTestLog("Wait for step down to start killing operations");
- checkLog.contains(primary, "Starting to kill user operations");
-
- jsTestLog("Commit the prepared transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- jsTestLog("Join parallel shells");
- joinStepDownThread();
- joinReadThread();
-
- // Validate that the read operation got killed during step down.
- const replMetrics = assert.commandWorked(primary.adminCommand({serverStatus: 1})).metrics.repl;
- assert.eq(replMetrics.stepDown.userOperationsKilled, 1, replMetrics);
-
- jsTestLog("Check nodes have correct data");
- assert.docEq(newPrimary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
- rst.awaitReplication();
- assert.docEq(primary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
-
- rst.stopSet();
+ db.adminCommand({configureFailPoint: "stepdownHangBeforeRSTLEnqueue", mode: "alwaysOn"}));
+
+ const freezeSecs = 24 * 60 * 60; // 24 hours
+ assert.commandFailedWithCode(db.adminCommand({"replSetStepDown": freezeSecs, "force": true}),
+ ErrorCodes.NotMaster);
+}, primary.port);
+
+waitForCurOpByFailPointNoNS(primaryDB, "stepdownHangBeforeRSTLEnqueue");
+
+jsTestLog("Force reconfig to swap the electable node");
+const newConfig = rst.getReplSetConfigFromNode();
+const oldPrimaryId = rst.getNodeId(primary);
+const newPrimaryId = rst.getNodeId(secondary);
+newConfig.members[newPrimaryId].priority = 1;
+newConfig.members[oldPrimaryId].priority = 0;
+newConfig.version++;
+assert.commandWorked(secondary.adminCommand({"replSetReconfig": newConfig, force: true}));
+
+jsTestLog("Step up the new electable node");
+rst.stepUp(secondary);
+
+jsTestLog("Wait for step up to complete");
+// Wait until the primary successfully steps down via heartbeat reconfig.
+rst.waitForState(secondary, ReplSetTest.State.PRIMARY);
+rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+const newPrimary = rst.getPrimary();
+
+jsTestLog("Prepare a transaction on the new primary");
+const session = newPrimary.startSession();
+const sessionDb = session.getDatabase(dbName);
+const sessionColl = sessionDb[collName];
+session.startTransaction({writeConcern: {w: "majority"}});
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {"b": 1}}));
+const prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+
+jsTestLog("Get a cluster time for afterClusterTime reads");
+TestData.clusterTimeAfterPrepare =
+ assert
+ .commandWorked(newPrimary.getDB(dbName)[collName].runCommand(
+ "insert", {documents: [{_id: "clusterTimeAfterPrepare"}]}))
+ .operationTime;
+
+// Make sure the insert gets replicated to the old primary (current secondary) so that its
+// clusterTime advances before we try to do an afterClusterTime read at the time of the insert.
+rst.awaitReplication();
+
+jsTestLog("Do a read that hits a prepare conflict on the old primary");
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "WTPrintPrepareConflictLog", mode: "alwaysOn"}));
+
+const joinReadThread = startParallelShell(() => {
+ db.getMongo().setSlaveOk(true);
+ oldPrimaryDB = db.getSiblingDB(TestData.dbName);
+
+ assert.commandFailedWithCode(oldPrimaryDB.runCommand({
+ find: TestData.collName,
+ filter: {_id: 0},
+ readConcern: {level: "local", afterClusterTime: TestData.clusterTimeAfterPrepare},
+ }),
+ ErrorCodes.InterruptedDueToReplStateChange);
+}, primary.port);
+
+jsTestLog("Wait to hit a prepare conflict");
+checkLog.contains(primary, "WTPrintPrepareConflictLog fail point enabled");
+
+jsTestLog("Allow step down to complete");
+assert.commandWorked(
+ primary.adminCommand({configureFailPoint: "stepdownHangBeforeRSTLEnqueue", mode: "off"}));
+
+jsTestLog("Wait for step down to start killing operations");
+checkLog.contains(primary, "Starting to kill user operations");
+
+jsTestLog("Commit the prepared transaction");
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
+
+jsTestLog("Join parallel shells");
+joinStepDownThread();
+joinReadThread();
+
+// Validate that the read operation got killed during step down.
+const replMetrics = assert.commandWorked(primary.adminCommand({serverStatus: 1})).metrics.repl;
+assert.eq(replMetrics.stepDown.userOperationsKilled, 1, replMetrics);
+
+jsTestLog("Check nodes have correct data");
+assert.docEq(newPrimary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
+rst.awaitReplication();
+assert.docEq(primary.getDB(dbName)[collName].find({_id: 0}).toArray(), [{_id: 0, b: 1}]);
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js
index f40caabe242..508645cdf5a 100644
--- a/jstests/replsets/stepdown3.js
+++ b/jstests/replsets/stepdown3.js
@@ -3,49 +3,48 @@
// This test requires the fsync command to force a secondary to be stale.
// @tags: [requires_fsync]
(function() {
- 'use strict';
-
- var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
- var nodes = replTest.startSet();
- replTest.initiate();
- var master = replTest.getPrimary();
-
- // do a write to allow stepping down of the primary;
- // otherwise, the primary will refuse to step down
- print("\ndo a write");
- master.getDB("test").foo.insert({x: 1});
- replTest.awaitReplication();
-
- // do another write, because the first one might be longer than 10 seconds ago
- // on the secondary (due to starting up), and we need to be within 10 seconds
- // to step down.
- var options = {writeConcern: {w: 2, wtimeout: 30000}};
- assert.writeOK(master.getDB("test").foo.insert({x: 2}, options));
- // lock secondary, to pause replication
- print("\nlock secondary");
- var locked = replTest._slaves[0];
- printjson(locked.getDB("admin").runCommand({fsync: 1, lock: 1}));
-
- // do a write
- print("\ndo a write");
- master.getDB("test").foo.insert({x: 3});
-
- // step down the primary asyncronously
- print("stepdown");
- var command =
- "sleep(4000); assert.commandWorked(db.adminCommand( { replSetStepDown : 60, force : 1 } ));";
- var awaitShell = startParallelShell(command, master.port);
-
- print("getlasterror; should return an error");
- let result = master.getDB("test").runCommand({getLastError: 1, w: 2, wtimeout: 10 * 60 * 1000});
- assert(ErrorCodes.isNotMasterError(result.code));
- print("result of gle:");
- printjson(result);
-
- awaitShell();
-
- // unlock and shut down
- printjson(locked.getDB("admin").fsyncUnlock());
- replTest.stopSet();
-
+'use strict';
+
+var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+// do a write to allow stepping down of the primary;
+// otherwise, the primary will refuse to step down
+print("\ndo a write");
+master.getDB("test").foo.insert({x: 1});
+replTest.awaitReplication();
+
+// do another write, because the first one might be longer than 10 seconds ago
+// on the secondary (due to starting up), and we need to be within 10 seconds
+// to step down.
+var options = {writeConcern: {w: 2, wtimeout: 30000}};
+assert.writeOK(master.getDB("test").foo.insert({x: 2}, options));
+// lock secondary, to pause replication
+print("\nlock secondary");
+var locked = replTest._slaves[0];
+printjson(locked.getDB("admin").runCommand({fsync: 1, lock: 1}));
+
+// do a write
+print("\ndo a write");
+master.getDB("test").foo.insert({x: 3});
+
+// step down the primary asyncronously
+print("stepdown");
+var command =
+ "sleep(4000); assert.commandWorked(db.adminCommand( { replSetStepDown : 60, force : 1 } ));";
+var awaitShell = startParallelShell(command, master.port);
+
+print("getlasterror; should return an error");
+let result = master.getDB("test").runCommand({getLastError: 1, w: 2, wtimeout: 10 * 60 * 1000});
+assert(ErrorCodes.isNotMasterError(result.code));
+print("result of gle:");
+printjson(result);
+
+awaitShell();
+
+// unlock and shut down
+printjson(locked.getDB("admin").fsyncUnlock());
+replTest.stopSet();
})();
diff --git a/jstests/replsets/stepdown_catch_up_opt.js b/jstests/replsets/stepdown_catch_up_opt.js
index 4fd88748ae4..82c31b49a0a 100644
--- a/jstests/replsets/stepdown_catch_up_opt.js
+++ b/jstests/replsets/stepdown_catch_up_opt.js
@@ -4,85 +4,85 @@
*/
(function() {
- 'use strict';
- var name = 'stepdown_catch_up_opt';
- // Only 2 nodes, so that we can control whether the secondary is caught up.
- var replTest = new ReplSetTest({name: name, nodes: 2});
- replTest.startSet();
- replTest.initiate();
- replTest.awaitSecondaryNodes();
- var primary = replTest.getPrimary();
- var secondary = replTest.getSecondary();
+'use strict';
+var name = 'stepdown_catch_up_opt';
+// Only 2 nodes, so that we can control whether the secondary is caught up.
+var replTest = new ReplSetTest({name: name, nodes: 2});
+replTest.startSet();
+replTest.initiate();
+replTest.awaitSecondaryNodes();
+var primary = replTest.getPrimary();
+var secondary = replTest.getSecondary();
- // Error codes we expect to see.
+// Error codes we expect to see.
- // If the secondary is not caught up.
- const noCaughtUpSecondariesCode = ErrorCodes.ExceededTimeLimit;
+// If the secondary is not caught up.
+const noCaughtUpSecondariesCode = ErrorCodes.ExceededTimeLimit;
- // If the stepdown period is shorter than the secondaryCatchUpPeriodSecs argument.
- var stepDownPeriodTooShortCode = 2;
+// If the stepdown period is shorter than the secondaryCatchUpPeriodSecs argument.
+var stepDownPeriodTooShortCode = 2;
- // If we give a string as an argument instead of an integer.
- var stringNotIntCode = 14;
+// If we give a string as an argument instead of an integer.
+var stringNotIntCode = 14;
- // Expect a failure with a string argument.
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}),
- stringNotIntCode,
- 'Expected string argument to secondaryCatchupPeriodSecs to fail.');
-
- // Expect a failure with a longer secondaryCatchupPeriodSecs than the stepdown period.
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 20}),
- stepDownPeriodTooShortCode,
- ('Expected replSetStepDown to fail given a stepdown time shorter than' +
- ' secondaryCatchUpPeriodSecs'));
+// Expect a failure with a string argument.
+assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}),
+ stringNotIntCode,
+ 'Expected string argument to secondaryCatchupPeriodSecs to fail.');
- jsTestLog('Stop secondary syncing.');
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
- 'Failed to configure rsSyncApplyStop failpoint.');
+// Expect a failure with a longer secondaryCatchupPeriodSecs than the stepdown period.
+assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 20}),
+ stepDownPeriodTooShortCode,
+ ('Expected replSetStepDown to fail given a stepdown time shorter than' +
+ ' secondaryCatchUpPeriodSecs'));
- function disableFailPoint() {
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
- 'Failed to disable rsSyncApplyStop failpoint.');
- }
+jsTestLog('Stop secondary syncing.');
+assert.commandWorked(
+ secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
+ 'Failed to configure rsSyncApplyStop failpoint.');
- // If any of these assertions fail, we need to disable the fail point in order for the mongod to
- // shut down.
- try {
- jsTestLog('Write to primary to make secondary out of sync.');
- assert.writeOK(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.');
- sleep(1000);
- // Secondary is now at least 1 second behind.
+function disableFailPoint() {
+ assert.commandWorked(
+ secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
+ 'Failed to disable rsSyncApplyStop failpoint.');
+}
- jsTestLog('Try to step down.');
- var startTime = new Date();
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}),
- noCaughtUpSecondariesCode,
- 'Expected replSetStepDown to fail, since no secondaries should be caught up.');
- var endTime = new Date();
+// If any of these assertions fail, we need to disable the fail point in order for the mongod to
+// shut down.
+try {
+ jsTestLog('Write to primary to make secondary out of sync.');
+ assert.writeOK(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.');
+ sleep(1000);
+ // Secondary is now at least 1 second behind.
- // Ensure it took at least 1 second to time out. Adjust the timeout a little bit
- // for the precision issue of clock on Windows 2K8.
- assert.lte(0.95,
- (endTime - startTime) / 1000,
- 'Expected replSetStepDown command to fail after 1 second.');
- } catch (err) {
- disableFailPoint();
- throw err;
- }
+ jsTestLog('Try to step down.');
+ var startTime = new Date();
+ assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}),
+ noCaughtUpSecondariesCode,
+ 'Expected replSetStepDown to fail, since no secondaries should be caught up.');
+ var endTime = new Date();
+ // Ensure it took at least 1 second to time out. Adjust the timeout a little bit
+ // for the precision issue of clock on Windows 2K8.
+ assert.lte(0.95,
+ (endTime - startTime) / 1000,
+ 'Expected replSetStepDown command to fail after 1 second.');
+} catch (err) {
disableFailPoint();
+ throw err;
+}
+
+disableFailPoint();
- // Make sure the primary hasn't changed, since all stepdowns should have failed.
- var primaryStatus = primary.getDB('admin').runCommand({replSetGetStatus: 1});
- assert.commandWorked(primaryStatus, 'replSetGetStatus failed.');
- assert.eq(primaryStatus.myState,
- ReplSetTest.State.PRIMARY,
- 'Expected original primary node to still be primary');
+// Make sure the primary hasn't changed, since all stepdowns should have failed.
+var primaryStatus = primary.getDB('admin').runCommand({replSetGetStatus: 1});
+assert.commandWorked(primaryStatus, 'replSetGetStatus failed.');
+assert.eq(primaryStatus.myState,
+ ReplSetTest.State.PRIMARY,
+ 'Expected original primary node to still be primary');
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/stepdown_kill_other_ops.js b/jstests/replsets/stepdown_kill_other_ops.js
index 1dc2404b8e4..06fc8de563f 100644
--- a/jstests/replsets/stepdown_kill_other_ops.js
+++ b/jstests/replsets/stepdown_kill_other_ops.js
@@ -1,70 +1,70 @@
// SERVER-15310 Ensure that stepDown kills all other running operations
(function() {
- "use strict";
- var name = "stepdownKillOps";
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], "priority": 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
+"use strict";
+var name = "stepdownKillOps";
+var replSet = new ReplSetTest({name: name, nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replSet.getPrimary();
- assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
- assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000}));
- replSet.awaitReplication();
+var primary = replSet.getPrimary();
+assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
+assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000}));
+replSet.awaitReplication();
- jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable");
- sleep(30000);
+jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable");
+sleep(30000);
- // Run sleep in a separate thread to take the global write lock which would prevent stepdown
- // from completing if it failed to kill all running operations.
- jsTestLog("Running {sleep:1, lock: 'w'} to grab global write lock");
- var sleepCmd = function() {
- // Run for 10 minutes if not interrupted.
- db.adminCommand({sleep: 1, lock: 'w', seconds: 60 * 10});
- };
- const startTime = new Date().getTime() / 1000;
- var sleepRunner = startParallelShell(sleepCmd, primary.port);
+// Run sleep in a separate thread to take the global write lock which would prevent stepdown
+// from completing if it failed to kill all running operations.
+jsTestLog("Running {sleep:1, lock: 'w'} to grab global write lock");
+var sleepCmd = function() {
+ // Run for 10 minutes if not interrupted.
+ db.adminCommand({sleep: 1, lock: 'w', seconds: 60 * 10});
+};
+const startTime = new Date().getTime() / 1000;
+var sleepRunner = startParallelShell(sleepCmd, primary.port);
- jsTestLog("Confirming that sleep() is running and has the global lock");
- assert.soon(function() {
- var res = primary.getDB('admin').currentOp();
- for (var index in res.inprog) {
- var entry = res.inprog[index];
- if (entry["command"] && entry["command"]["sleep"]) {
- if ("W" === entry["locks"]["Global"]) {
- return true;
- }
+jsTestLog("Confirming that sleep() is running and has the global lock");
+assert.soon(function() {
+ var res = primary.getDB('admin').currentOp();
+ for (var index in res.inprog) {
+ var entry = res.inprog[index];
+ if (entry["command"] && entry["command"]["sleep"]) {
+ if ("W" === entry["locks"]["Global"]) {
+ return true;
}
}
- printjson(res);
- return false;
- }, "sleep never ran and grabbed the global write lock");
+ }
+ printjson(res);
+ return false;
+}, "sleep never ran and grabbed the global write lock");
- jsTestLog("Stepping down");
- assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 30}));
+jsTestLog("Stepping down");
+assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 30}));
- jsTestLog("Waiting for former PRIMARY to become SECONDARY");
- replSet.waitForState(primary, ReplSetTest.State.SECONDARY, 30000);
+jsTestLog("Waiting for former PRIMARY to become SECONDARY");
+replSet.waitForState(primary, ReplSetTest.State.SECONDARY, 30000);
- var newPrimary = replSet.getPrimary();
- assert.neq(primary, newPrimary, "SECONDARY did not become PRIMARY");
+var newPrimary = replSet.getPrimary();
+assert.neq(primary, newPrimary, "SECONDARY did not become PRIMARY");
- sleepRunner({checkExitSuccess: false});
- const endTime = new Date().getTime() / 1000;
- const duration = endTime - startTime;
- assert.lt(duration,
- 60 * 9, // In practice, this should be well under 1 minute.
- "Sleep lock held longer than expected, possibly uninterrupted.");
+sleepRunner({checkExitSuccess: false});
+const endTime = new Date().getTime() / 1000;
+const duration = endTime - startTime;
+assert.lt(duration,
+ 60 * 9, // In practice, this should be well under 1 minute.
+ "Sleep lock held longer than expected, possibly uninterrupted.");
- replSet.stopSet();
+replSet.stopSet();
})();
diff --git a/jstests/replsets/stepdown_killop.js b/jstests/replsets/stepdown_killop.js
index 5c3ac0d33ab..87d7d884a8b 100644
--- a/jstests/replsets/stepdown_killop.js
+++ b/jstests/replsets/stepdown_killop.js
@@ -7,76 +7,76 @@
// 6. Writes should become allowed again and the primary should stay primary.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js");
+load("jstests/libs/write_concern_util.js");
- var name = "interruptStepDown";
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], "priority": 0},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
+var name = "interruptStepDown";
+var replSet = new ReplSetTest({name: name, nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], "priority": 0},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- var secondary = replSet.getSecondary();
- jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
- stopServerReplication(secondary);
+var secondary = replSet.getSecondary();
+jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
+stopServerReplication(secondary);
- var primary = replSet.getPrimary();
- assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
+var primary = replSet.getPrimary();
+assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
- // do a write then ask the PRIMARY to stepdown
- jsTestLog("Initiating stepdown");
- assert.writeOK(primary.getDB(name).foo.insert(
- {myDoc: true, x: 1}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- var stepDownCmd = function() {
- var res = db.getSiblingDB('admin').runCommand(
- {replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60});
- assert.commandFailedWithCode(res, 11601 /*interrupted*/);
- };
- var stepDowner = startParallelShell(stepDownCmd, primary.port);
- var stepDownOpID = -1;
+// do a write then ask the PRIMARY to stepdown
+jsTestLog("Initiating stepdown");
+assert.writeOK(primary.getDB(name).foo.insert(
+ {myDoc: true, x: 1}, {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+var stepDownCmd = function() {
+ var res =
+ db.getSiblingDB('admin').runCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60});
+ assert.commandFailedWithCode(res, 11601 /*interrupted*/);
+};
+var stepDowner = startParallelShell(stepDownCmd, primary.port);
+var stepDownOpID = -1;
- jsTestLog("Looking for stepdown in currentOp() output");
- assert.soon(function() {
- var res = primary.getDB('admin').currentOp(true);
- for (var index in res.inprog) {
- var entry = res.inprog[index];
- if (entry["command"] && entry["command"]["replSetStepDown"] === 60) {
- stepDownOpID = entry.opid;
- return true;
- }
+jsTestLog("Looking for stepdown in currentOp() output");
+assert.soon(function() {
+ var res = primary.getDB('admin').currentOp(true);
+ for (var index in res.inprog) {
+ var entry = res.inprog[index];
+ if (entry["command"] && entry["command"]["replSetStepDown"] === 60) {
+ stepDownOpID = entry.opid;
+ return true;
}
- printjson(res);
- return false;
- }, "No pending stepdown command found");
+ }
+ printjson(res);
+ return false;
+}, "No pending stepdown command found");
- jsTestLog("Ensure that writes start failing with NotMaster errors");
- assert.soonNoExcept(function() {
- assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
- return true;
- });
+jsTestLog("Ensure that writes start failing with NotMaster errors");
+assert.soonNoExcept(function() {
+ assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
+ return true;
+});
- jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
- "ourselves as PRIMARY");
- assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
+jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
+ "ourselves as PRIMARY");
+assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
- // kill the stepDown and ensure that that unblocks writes to the db
- jsTestLog("Killing stepdown");
- primary.getDB('admin').killOp(stepDownOpID);
+// kill the stepDown and ensure that that unblocks writes to the db
+jsTestLog("Killing stepdown");
+primary.getDB('admin').killOp(stepDownOpID);
- var exitCode = stepDowner();
- assert.eq(0, exitCode);
+var exitCode = stepDowner();
+assert.eq(0, exitCode);
- assert.writeOK(primary.getDB(name).foo.remove({}));
- restartServerReplication(secondary);
- replSet.stopSet();
+assert.writeOK(primary.getDB(name).foo.remove({}));
+restartServerReplication(secondary);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/stepdown_long_wait_time.js b/jstests/replsets/stepdown_long_wait_time.js
index 1d0aab19ac3..5958aa3a86c 100644
--- a/jstests/replsets/stepdown_long_wait_time.js
+++ b/jstests/replsets/stepdown_long_wait_time.js
@@ -7,70 +7,69 @@
// 6. Wait for PRIMARY to StepDown.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js");
+load("jstests/libs/write_concern_util.js");
- var name = "stepDownWithLongWait";
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0], "priority": 3},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2], "arbiterOnly": true}
- ]
- });
+var name = "stepDownWithLongWait";
+var replSet = new ReplSetTest({name: name, nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replSet.getPrimary();
+replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY);
+var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
- jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
- stopServerReplication(secondary);
+var secondary = replSet.getSecondary();
+jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
+stopServerReplication(secondary);
- jsTestLog("do a write then ask the PRIMARY to stepdown");
- var options = {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}};
- assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
+jsTestLog("do a write then ask the PRIMARY to stepdown");
+var options = {writeConcern: {w: 1, wtimeout: ReplSetTest.kDefaultTimeoutMS}};
+assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
- var stepDownCmd = function() {
- assert.commandWorked(
- db.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
- };
- var stepDowner = startParallelShell(stepDownCmd, primary.port);
+var stepDownCmd = function() {
+ assert.commandWorked(db.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
+};
+var stepDowner = startParallelShell(stepDownCmd, primary.port);
- assert.soon(function() {
- var res = primary.getDB('admin').currentOp(true);
- for (var entry in res.inprog) {
- if (res.inprog[entry]["command"] &&
- res.inprog[entry]["command"]["replSetStepDown"] === 60) {
- return true;
- }
+assert.soon(function() {
+ var res = primary.getDB('admin').currentOp(true);
+ for (var entry in res.inprog) {
+ if (res.inprog[entry]["command"] &&
+ res.inprog[entry]["command"]["replSetStepDown"] === 60) {
+ return true;
}
- printjson(res);
- return false;
- }, "No pending stepdown command found");
+ }
+ printjson(res);
+ return false;
+}, "No pending stepdown command found");
- jsTestLog("Ensure that writes start failing with NotMaster errors");
- assert.soonNoExcept(function() {
- assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
- return true;
- });
+jsTestLog("Ensure that writes start failing with NotMaster errors");
+assert.soonNoExcept(function() {
+ assert.commandFailedWithCode(primary.getDB(name).foo.insert({x: 2}), ErrorCodes.NotMaster);
+ return true;
+});
- jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
- "ourselves as PRIMARY");
- assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
+jsTestLog("Ensure that even though writes are failing with NotMaster, we still report " +
+ "ourselves as PRIMARY");
+assert.eq(ReplSetTest.State.PRIMARY, primary.adminCommand('replSetGetStatus').myState);
- jsTestLog('Enable replication on the SECONDARY ' + secondary.host);
- restartServerReplication(secondary);
+jsTestLog('Enable replication on the SECONDARY ' + secondary.host);
+restartServerReplication(secondary);
- jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down.");
- replSet.waitForState(primary, ReplSetTest.State.SECONDARY);
- var exitCode = stepDowner();
+jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down.");
+replSet.waitForState(primary, ReplSetTest.State.SECONDARY);
+var exitCode = stepDowner();
- jsTestLog("Wait for SECONDARY " + secondary.host + " to become PRIMARY");
- replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
- replSet.stopSet();
+jsTestLog("Wait for SECONDARY " + secondary.host + " to become PRIMARY");
+replSet.waitForState(secondary, ReplSetTest.State.PRIMARY);
+replSet.stopSet();
})();
diff --git a/jstests/replsets/stepdown_needs_electable_secondary.js b/jstests/replsets/stepdown_needs_electable_secondary.js
index 799a2e69a9b..4d2124cc831 100644
--- a/jstests/replsets/stepdown_needs_electable_secondary.js
+++ b/jstests/replsets/stepdown_needs_electable_secondary.js
@@ -20,123 +20,117 @@
*
*/
(function() {
- 'use strict';
-
- load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries,
- // restartServerReplication,
- // restartReplSetReplication
-
- var name = 'stepdown_needs_electable_secondary';
-
- var replTest = new ReplSetTest({name: name, nodes: 5});
- var nodes = replTest.nodeList();
-
- replTest.startSet();
- replTest.initiate({
- "_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2]},
- {"_id": 3, "host": nodes[3], "priority": 0}, // unelectable
- {"_id": 4, "host": nodes[4], "priority": 0} // unelectable
- ],
- "settings": {"chainingAllowed": false}
- });
-
- function assertStepDownFailsWithExceededTimeLimit(node) {
- assert.commandFailedWithCode(
- node.adminCommand({replSetStepDown: 5, secondaryCatchUpPeriodSecs: 5}),
- ErrorCodes.ExceededTimeLimit,
- "step down did not fail with 'ExceededTimeLimit'");
- }
-
- function assertStepDownSucceeds(node) {
- assert.commandWorked(
- node.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
- }
-
- var primary = replTest.getPrimary();
-
- jsTestLog("Blocking writes to all secondaries.");
- stopReplicationOnSecondaries(replTest);
-
- jsTestLog("Doing a write to primary.");
- var testDB = replTest.getPrimary().getDB('testdb');
- var coll = testDB.stepdown_needs_electable_secondary;
- var timeout = ReplSetTest.kDefaultTimeoutMS;
- assert.writeOK(
- coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 1, wtimeout: timeout}}));
-
- // Try to step down with only the primary caught up (1 node out of 5).
- // stepDown should fail.
- jsTestLog("Trying to step down primary with only 1 node out of 5 caught up.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- // Get the two unelectable secondaries
- var secondaryB_unelectable = replTest.nodes[3];
- var secondaryC_unelectable = replTest.nodes[4];
-
- // Get an electable secondary
- var secondaryA_electable = replTest.getSecondaries().find(function(s) {
- var nodeId = replTest.getNodeId(s);
- return (nodeId !== 3 && nodeId !== 4); // nodes 3 and 4 are set to be unelectable
- });
-
- // Enable writes to Secondary B (unelectable). Await replication.
- // (2 out of 5 nodes caught up, 0 electable)
- // stepDown should fail due to no caught up majority.
- jsTestLog("Re-enabling writes to unelectable secondary: node #" +
- replTest.getNodeId(secondaryB_unelectable) + ", " + secondaryB_unelectable);
- restartServerReplication(secondaryB_unelectable);
-
- // Wait for this secondary to catch up by issuing a write that must be replicated to 2 nodes
- assert.writeOK(
- coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 2, wtimeout: timeout}}));
-
- // Try to step down and fail
- jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- // Enable writes to Secondary C (unelectable). Await replication.
- // (3 out of 5 nodes caught up, 0 electable)
- // stepDown should fail due to caught up majority without electable node.
- jsTestLog("Re-enabling writes to unelectable secondary: node #" +
- replTest.getNodeId(secondaryC_unelectable) + ", " + secondaryC_unelectable);
- restartServerReplication(secondaryC_unelectable);
-
- // Wait for this secondary to catch up by issuing a write that must be replicated to 3 nodes
- assert.writeOK(
- coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 3, wtimeout: timeout}}));
-
- // Try to step down and fail
- jsTestLog("Trying to step down primary with a caught up majority that " +
- "doesn't contain an electable node.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- // Enable writes to Secondary A (electable). Await replication.
- // (4 out of 5 nodes caught up, 1 electable)
- // stepDown should succeed due to caught up majority containing an electable node.
- jsTestLog("Re-enabling writes to electable secondary: node #" +
- replTest.getNodeId(secondaryA_electable) + ", " + secondaryA_electable);
- restartServerReplication(secondaryA_electable);
-
- // Wait for this secondary to catch up by issuing a write that must be replicated to 4 nodes
- assert.writeOK(
- coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 4, wtimeout: timeout}}));
-
- // Try to step down. We expect success, so catch the exception thrown by 'replSetStepDown'.
- jsTestLog("Trying to step down primary with a caught up majority that " +
- "does contain an electable node.");
-
- assertStepDownSucceeds(primary);
-
- // Make sure that original primary has transitioned to SECONDARY state
- jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down.");
- replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- // Disable all fail points for clean shutdown
- restartReplSetReplication(replTest);
- replTest.stopSet();
-
+'use strict';
+
+load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries,
+ // restartServerReplication,
+ // restartReplSetReplication
+
+var name = 'stepdown_needs_electable_secondary';
+
+var replTest = new ReplSetTest({name: name, nodes: 5});
+var nodes = replTest.nodeList();
+
+replTest.startSet();
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2]},
+ {"_id": 3, "host": nodes[3], "priority": 0}, // unelectable
+ {"_id": 4, "host": nodes[4], "priority": 0} // unelectable
+ ],
+ "settings": {"chainingAllowed": false}
+});
+
+function assertStepDownFailsWithExceededTimeLimit(node) {
+ assert.commandFailedWithCode(
+ node.adminCommand({replSetStepDown: 5, secondaryCatchUpPeriodSecs: 5}),
+ ErrorCodes.ExceededTimeLimit,
+ "step down did not fail with 'ExceededTimeLimit'");
+}
+
+function assertStepDownSucceeds(node) {
+ assert.commandWorked(node.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
+}
+
+var primary = replTest.getPrimary();
+
+jsTestLog("Blocking writes to all secondaries.");
+stopReplicationOnSecondaries(replTest);
+
+jsTestLog("Doing a write to primary.");
+var testDB = replTest.getPrimary().getDB('testdb');
+var coll = testDB.stepdown_needs_electable_secondary;
+var timeout = ReplSetTest.kDefaultTimeoutMS;
+assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 1, wtimeout: timeout}}));
+
+// Try to step down with only the primary caught up (1 node out of 5).
+// stepDown should fail.
+jsTestLog("Trying to step down primary with only 1 node out of 5 caught up.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+// Get the two unelectable secondaries
+var secondaryB_unelectable = replTest.nodes[3];
+var secondaryC_unelectable = replTest.nodes[4];
+
+// Get an electable secondary
+var secondaryA_electable = replTest.getSecondaries().find(function(s) {
+ var nodeId = replTest.getNodeId(s);
+ return (nodeId !== 3 && nodeId !== 4); // nodes 3 and 4 are set to be unelectable
+});
+
+// Enable writes to Secondary B (unelectable). Await replication.
+// (2 out of 5 nodes caught up, 0 electable)
+// stepDown should fail due to no caught up majority.
+jsTestLog("Re-enabling writes to unelectable secondary: node #" +
+ replTest.getNodeId(secondaryB_unelectable) + ", " + secondaryB_unelectable);
+restartServerReplication(secondaryB_unelectable);
+
+// Wait for this secondary to catch up by issuing a write that must be replicated to 2 nodes
+assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 2, wtimeout: timeout}}));
+
+// Try to step down and fail
+jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+// Enable writes to Secondary C (unelectable). Await replication.
+// (3 out of 5 nodes caught up, 0 electable)
+// stepDown should fail due to caught up majority without electable node.
+jsTestLog("Re-enabling writes to unelectable secondary: node #" +
+ replTest.getNodeId(secondaryC_unelectable) + ", " + secondaryC_unelectable);
+restartServerReplication(secondaryC_unelectable);
+
+// Wait for this secondary to catch up by issuing a write that must be replicated to 3 nodes
+assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 3, wtimeout: timeout}}));
+
+// Try to step down and fail
+jsTestLog("Trying to step down primary with a caught up majority that " +
+ "doesn't contain an electable node.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+// Enable writes to Secondary A (electable). Await replication.
+// (4 out of 5 nodes caught up, 1 electable)
+// stepDown should succeed due to caught up majority containing an electable node.
+jsTestLog("Re-enabling writes to electable secondary: node #" +
+ replTest.getNodeId(secondaryA_electable) + ", " + secondaryA_electable);
+restartServerReplication(secondaryA_electable);
+
+// Wait for this secondary to catch up by issuing a write that must be replicated to 4 nodes
+assert.writeOK(coll.insert({"dummy_key": "dummy_val"}, {writeConcern: {w: 4, wtimeout: timeout}}));
+
+// Try to step down. We expect success, so catch the exception thrown by 'replSetStepDown'.
+jsTestLog("Trying to step down primary with a caught up majority that " +
+ "does contain an electable node.");
+
+assertStepDownSucceeds(primary);
+
+// Make sure that original primary has transitioned to SECONDARY state
+jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down.");
+replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+// Disable all fail points for clean shutdown
+restartReplSetReplication(replTest);
+replTest.stopSet();
}());
diff --git a/jstests/replsets/stepdown_needs_majority.js b/jstests/replsets/stepdown_needs_majority.js
index efc874fde3a..cb465fb3f30 100644
--- a/jstests/replsets/stepdown_needs_majority.js
+++ b/jstests/replsets/stepdown_needs_majority.js
@@ -16,92 +16,90 @@
*
*/
(function() {
- 'use strict';
-
- load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries, //
- // restartServerReplication,
- // restartReplSetReplication
-
- function assertStepDownFailsWithExceededTimeLimit(node) {
- assert.commandFailedWithCode(
- node.adminCommand({replSetStepDown: 5, secondaryCatchUpPeriodSecs: 5}),
- ErrorCodes.ExceededTimeLimit,
- "step down did not fail with 'ExceededTimeLimit'");
- }
-
- function assertStepDownSucceeds(node) {
- assert.commandWorked(
- node.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
- }
-
- function nodeIdStr(repltest, node) {
- return "node #" + repltest.getNodeId(node) + ", " + node.host;
- }
-
- //
- // Test setup
- //
- var name = 'stepdown_needs_majority';
- var replTest = new ReplSetTest({name: name, nodes: 5, settings: {chainingAllowed: false}});
-
- replTest.startSet();
- replTest.initiate();
-
- var primary = replTest.getPrimary();
- var testDB = primary.getDB('testdb');
- var coll = testDB[name];
- var dummy_doc = {"dummy_key": "dummy_val"};
- var timeout = ReplSetTest.kDefaultTimeoutMS;
-
- //
- // Block writes to all secondaries
- //
- jsTestLog("Blocking writes to all secondaries.");
- stopReplicationOnSecondaries(replTest);
-
- //
- // Write to the primary and attempt stepdown
- //
- jsTestLog("Issuing a write to the primary(" + primary.host + ") with write_concern:1");
- assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 1, wtimeout: timeout}}));
-
- jsTestLog("Trying to step down primary with only 1 node out of 5 caught up.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- //
- // Re-enable writes to Secondary A and attempt stepdown
- //
- var secondaryA = replTest.getSecondaries()[0];
- jsTestLog("Reenabling writes to one secondary (" + nodeIdStr(replTest, secondaryA) + ")");
- restartServerReplication(secondaryA);
-
- jsTestLog("Issuing a write to the primary with write_concern:2");
- assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 2, wtimeout: timeout}}));
-
- jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
- assertStepDownFailsWithExceededTimeLimit(primary);
-
- //
- // Re-enable writes to Secondary B and attempt stepdown
- //
- var secondaryB = replTest.getSecondaries()[1];
- jsTestLog("Reenabling writes to another secondary (" + nodeIdStr(replTest, secondaryB) + ")");
- restartServerReplication(secondaryB);
-
- jsTestLog("Issuing a write to the primary with write_concern:3");
- assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 3, wtimeout: timeout}}));
-
- jsTestLog("Trying to step down primary with 3 nodes out of 5 caught up.");
- assertStepDownSucceeds(primary);
-
- jsTestLog("Waiting for PRIMARY(" + primary.host + ") to step down & become SECONDARY.");
- replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- //
- // Disable failpoints and stop replica set
- //
- jsTestLog("Disabling all fail points to allow for clean shutdown");
- restartReplSetReplication(replTest);
- replTest.stopSet();
-
+'use strict';
+
+load("jstests/libs/write_concern_util.js"); // for stopReplicationOnSecondaries, //
+ // restartServerReplication,
+ // restartReplSetReplication
+
+function assertStepDownFailsWithExceededTimeLimit(node) {
+ assert.commandFailedWithCode(
+ node.adminCommand({replSetStepDown: 5, secondaryCatchUpPeriodSecs: 5}),
+ ErrorCodes.ExceededTimeLimit,
+ "step down did not fail with 'ExceededTimeLimit'");
+}
+
+function assertStepDownSucceeds(node) {
+ assert.commandWorked(node.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60}));
+}
+
+function nodeIdStr(repltest, node) {
+ return "node #" + repltest.getNodeId(node) + ", " + node.host;
+}
+
+//
+// Test setup
+//
+var name = 'stepdown_needs_majority';
+var replTest = new ReplSetTest({name: name, nodes: 5, settings: {chainingAllowed: false}});
+
+replTest.startSet();
+replTest.initiate();
+
+var primary = replTest.getPrimary();
+var testDB = primary.getDB('testdb');
+var coll = testDB[name];
+var dummy_doc = {"dummy_key": "dummy_val"};
+var timeout = ReplSetTest.kDefaultTimeoutMS;
+
+//
+// Block writes to all secondaries
+//
+jsTestLog("Blocking writes to all secondaries.");
+stopReplicationOnSecondaries(replTest);
+
+//
+// Write to the primary and attempt stepdown
+//
+jsTestLog("Issuing a write to the primary(" + primary.host + ") with write_concern:1");
+assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 1, wtimeout: timeout}}));
+
+jsTestLog("Trying to step down primary with only 1 node out of 5 caught up.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+//
+// Re-enable writes to Secondary A and attempt stepdown
+//
+var secondaryA = replTest.getSecondaries()[0];
+jsTestLog("Reenabling writes to one secondary (" + nodeIdStr(replTest, secondaryA) + ")");
+restartServerReplication(secondaryA);
+
+jsTestLog("Issuing a write to the primary with write_concern:2");
+assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 2, wtimeout: timeout}}));
+
+jsTestLog("Trying to step down primary with only 2 nodes out of 5 caught up.");
+assertStepDownFailsWithExceededTimeLimit(primary);
+
+//
+// Re-enable writes to Secondary B and attempt stepdown
+//
+var secondaryB = replTest.getSecondaries()[1];
+jsTestLog("Reenabling writes to another secondary (" + nodeIdStr(replTest, secondaryB) + ")");
+restartServerReplication(secondaryB);
+
+jsTestLog("Issuing a write to the primary with write_concern:3");
+assert.writeOK(coll.insert(dummy_doc, {writeConcern: {w: 3, wtimeout: timeout}}));
+
+jsTestLog("Trying to step down primary with 3 nodes out of 5 caught up.");
+assertStepDownSucceeds(primary);
+
+jsTestLog("Waiting for PRIMARY(" + primary.host + ") to step down & become SECONDARY.");
+replTest.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+//
+// Disable failpoints and stop replica set
+//
+jsTestLog("Disabling all fail points to allow for clean shutdown");
+restartReplSetReplication(replTest);
+replTest.stopSet();
}());
diff --git a/jstests/replsets/stepup.js b/jstests/replsets/stepup.js
index 4c5d20d42c4..d4ce932a5bc 100644
--- a/jstests/replsets/stepup.js
+++ b/jstests/replsets/stepup.js
@@ -4,70 +4,68 @@ load("jstests/replsets/rslib.js");
load('jstests/replsets/libs/election_metrics.js');
(function() {
- "use strict";
- var name = "stepup";
- var rst = new ReplSetTest({name: name, nodes: 2});
+"use strict";
+var name = "stepup";
+var rst = new ReplSetTest({name: name, nodes: 2});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- var primary = rst.getPrimary();
- var secondary = rst.getSecondary();
+var primary = rst.getPrimary();
+var secondary = rst.getSecondary();
- const initialSecondaryStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
+const initialSecondaryStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
- // Step up the primary. Return OK because it's already the primary.
- var res = primary.adminCommand({replSetStepUp: 1});
- assert.commandWorked(res);
- assert.eq(primary, rst.getPrimary());
+// Step up the primary. Return OK because it's already the primary.
+var res = primary.adminCommand({replSetStepUp: 1});
+assert.commandWorked(res);
+assert.eq(primary, rst.getPrimary());
- // Step up the secondary, but it's not eligible to be primary.
- // Enable fail point on secondary.
- assert.commandWorked(secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
+// Step up the secondary, but it's not eligible to be primary.
+// Enable fail point on secondary.
+assert.commandWorked(
+ secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}));
- assert.writeOK(primary.getDB("test").bar.insert({x: 2}, {writeConcern: {w: 1}}));
- res = secondary.adminCommand({replSetStepUp: 1});
- assert.commandFailedWithCode(res, ErrorCodes.CommandFailed);
- assert.commandWorked(
- secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
+assert.writeOK(primary.getDB("test").bar.insert({x: 2}, {writeConcern: {w: 1}}));
+res = secondary.adminCommand({replSetStepUp: 1});
+assert.commandFailedWithCode(res, ErrorCodes.CommandFailed);
+assert.commandWorked(
+ secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}));
- // Wait for the secondary to catch up by replicating a doc to both nodes.
- assert.writeOK(primary.getDB("test").bar.insert({x: 3}, {writeConcern: {w: "majority"}}));
+// Wait for the secondary to catch up by replicating a doc to both nodes.
+assert.writeOK(primary.getDB("test").bar.insert({x: 3}, {writeConcern: {w: "majority"}}));
- // Step up the secondary. Retry since the old primary may step down when we try to ask for its
- // vote.
- assert.soonNoExcept(function() {
- return secondary.adminCommand({replSetStepUp: 1}).ok;
- });
+// Step up the secondary. Retry since the old primary may step down when we try to ask for its
+// vote.
+assert.soonNoExcept(function() {
+ return secondary.adminCommand({replSetStepUp: 1}).ok;
+});
- // Make sure the step up succeeded.
- assert.eq(secondary, rst.getPrimary());
+// Make sure the step up succeeded.
+assert.eq(secondary, rst.getPrimary());
- const newSecondaryStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
+const newSecondaryStatus = assert.commandWorked(secondary.adminCommand({serverStatus: 1}));
- // Check that both the 'called' and 'successful' fields of stepUpCmd have been incremented in
- // serverStatus, and that they have not been incremented in any of the other election reason
- // counters.
- verifyServerStatusElectionReasonCounterChange(
- initialSecondaryStatus.electionMetrics, newSecondaryStatus.electionMetrics, "stepUpCmd", 1);
- verifyServerStatusElectionReasonCounterChange(initialSecondaryStatus.electionMetrics,
- newSecondaryStatus.electionMetrics,
- "priorityTakeover",
- 0);
- verifyServerStatusElectionReasonCounterChange(initialSecondaryStatus.electionMetrics,
- newSecondaryStatus.electionMetrics,
- "catchUpTakeover",
- 0);
- verifyServerStatusElectionReasonCounterChange(initialSecondaryStatus.electionMetrics,
- newSecondaryStatus.electionMetrics,
- "electionTimeout",
- 0);
- verifyServerStatusElectionReasonCounterChange(initialSecondaryStatus.electionMetrics,
- newSecondaryStatus.electionMetrics,
- "freezeTimeout",
- 0);
+// Check that both the 'called' and 'successful' fields of stepUpCmd have been incremented in
+// serverStatus, and that they have not been incremented in any of the other election reason
+// counters.
+verifyServerStatusElectionReasonCounterChange(
+ initialSecondaryStatus.electionMetrics, newSecondaryStatus.electionMetrics, "stepUpCmd", 1);
+verifyServerStatusElectionReasonCounterChange(initialSecondaryStatus.electionMetrics,
+ newSecondaryStatus.electionMetrics,
+ "priorityTakeover",
+ 0);
+verifyServerStatusElectionReasonCounterChange(initialSecondaryStatus.electionMetrics,
+ newSecondaryStatus.electionMetrics,
+ "catchUpTakeover",
+ 0);
+verifyServerStatusElectionReasonCounterChange(initialSecondaryStatus.electionMetrics,
+ newSecondaryStatus.electionMetrics,
+ "electionTimeout",
+ 0);
+verifyServerStatusElectionReasonCounterChange(
+ initialSecondaryStatus.electionMetrics, newSecondaryStatus.electionMetrics, "freezeTimeout", 0);
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/storage_commit_out_of_order.js b/jstests/replsets/storage_commit_out_of_order.js
index f6a65ebae97..7d96ae4c235 100644
--- a/jstests/replsets/storage_commit_out_of_order.js
+++ b/jstests/replsets/storage_commit_out_of_order.js
@@ -8,67 +8,67 @@
* is released after a few seconds and asserts that its write concern can be satisfied.
*/
(function() {
- 'use strict';
+'use strict';
- load('jstests/libs/parallelTester.js');
+load('jstests/libs/parallelTester.js');
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const dbName = 'storage_commit_out_of_order';
- const collName = 'foo';
- const numThreads = 2;
- const primary = rst.getPrimary();
- const coll = primary.getDB(dbName).getCollection(collName);
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const dbName = 'storage_commit_out_of_order';
+const collName = 'foo';
+const numThreads = 2;
+const primary = rst.getPrimary();
+const coll = primary.getDB(dbName).getCollection(collName);
- /**
- * Waits for the provided latch to reach 0 and then does a single w:majority insert.
- */
- const majorityInsert = function(num, host, dbName, collName, latch) {
- const m = new Mongo(host);
- latch.countDown();
- while (latch.getCount() > 0) {
- // do nothing
- }
- return m.getDB(dbName).runCommand({
- insert: collName,
- documents: [{b: num}],
- writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}
- });
- };
+/**
+ * Waits for the provided latch to reach 0 and then does a single w:majority insert.
+ */
+const majorityInsert = function(num, host, dbName, collName, latch) {
+ const m = new Mongo(host);
+ latch.countDown();
+ while (latch.getCount() > 0) {
+ // do nothing
+ }
+ return m.getDB(dbName).runCommand({
+ insert: collName,
+ documents: [{b: num}],
+ writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}
+ });
+};
- assert.commandWorked(primary.setLogLevel(2, 'replication'));
- assert.commandWorked(coll.insert(
- {a: 1}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+assert.commandWorked(primary.setLogLevel(2, 'replication'));
+assert.commandWorked(
+ coll.insert({a: 1}, {writeConcern: {w: 'majority', wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- // Turn on a fail point to force the first thread to receive an optime from the optime
- // generator to wait a few seconds before storage-committing the insert.
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: 'sleepBetweenInsertOpTimeGenerationAndLogOp',
- mode: {times: 1},
- data: {waitForMillis: 3000}
- }));
+// Turn on a fail point to force the first thread to receive an optime from the optime
+// generator to wait a few seconds before storage-committing the insert.
+assert.commandWorked(primary.adminCommand({
+ configureFailPoint: 'sleepBetweenInsertOpTimeGenerationAndLogOp',
+ mode: {times: 1},
+ data: {waitForMillis: 3000}
+}));
- // Start a bunch of threads. They will block waiting on the latch to hit 0.
- const t = [];
- const counter = new CountDownLatch(numThreads + 1);
- for (let i = 0; i < numThreads; ++i) {
- t[i] = new ScopedThread(majorityInsert, i, coll.getMongo().host, dbName, collName, counter);
- t[i].start();
- }
+// Start a bunch of threads. They will block waiting on the latch to hit 0.
+const t = [];
+const counter = new CountDownLatch(numThreads + 1);
+for (let i = 0; i < numThreads; ++i) {
+ t[i] = new ScopedThread(majorityInsert, i, coll.getMongo().host, dbName, collName, counter);
+ t[i].start();
+}
- // Release the threads with the latch once they are all blocked on it.
- jsTestLog('All threads started.');
- assert.soon(() => counter.getCount() === 1);
- jsTestLog('All threads at barrier.');
- counter.countDown();
- jsTestLog('All threads finishing.');
+// Release the threads with the latch once they are all blocked on it.
+jsTestLog('All threads started.');
+assert.soon(() => counter.getCount() === 1);
+jsTestLog('All threads at barrier.');
+counter.countDown();
+jsTestLog('All threads finishing.');
- // Wait for all threads to complete and ensure they succeeded.
- for (let i = 0; i < numThreads; ++i) {
- t[i].join();
- assert.commandWorked(t[i].returnData());
- }
+// Wait for all threads to complete and ensure they succeeded.
+for (let i = 0; i < numThreads; ++i) {
+ t[i].join();
+ assert.commandWorked(t[i].returnData());
+}
- rst.stopSet();
+rst.stopSet();
}());
diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js
index 79a1b48fa68..4a1053b04bb 100644
--- a/jstests/replsets/sync2.js
+++ b/jstests/replsets/sync2.js
@@ -2,50 +2,50 @@
// are disconnected from their current sync source.
(function() {
- 'use strict';
-
- var replTest = new ReplSetTest({
- name: 'sync2',
- nodes: [{rsConfig: {priority: 5}}, {arbiter: true}, {}, {}, {}],
- useBridge: true
- });
- var conns = replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- jsTestLog("Replica set test initialized");
-
- master.getDB("foo").bar.insert({x: 1});
- replTest.awaitReplication();
-
- conns[0].disconnect(conns[4]);
- conns[1].disconnect(conns[2]);
- conns[2].disconnect(conns[3]);
- conns[3].disconnect(conns[1]);
-
- // 4 is connected to 2
- conns[4].disconnect(conns[1]);
- conns[4].disconnect(conns[3]);
-
- assert.soon(function() {
- master = replTest.getPrimary();
- return master === conns[0];
- }, "node 0 should become primary before timeout", replTest.kDefaultTimeoutMS);
-
- replTest.awaitReplication();
- jsTestLog("Checking that ops still replicate correctly");
- var option = {writeConcern: {w: conns.length - 1, wtimeout: replTest.kDefaultTimeoutMS}};
- // In PV0, this write can fail as a result of a bad spanning tree. If 2 was syncing from 4 prior
- // to bridging, it will not change sync sources and receive the write in time. This was not a
- // problem in 3.0 because the old version of mongobridge caused all the nodes to restart during
- // partitioning, forcing the set to rebuild the spanning tree.
- assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
-
- // 4 is connected to 3
- conns[4].disconnect(conns[2]);
- conns[4].reconnect(conns[3]);
-
- assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
-
- replTest.stopSet();
+'use strict';
+
+var replTest = new ReplSetTest({
+ name: 'sync2',
+ nodes: [{rsConfig: {priority: 5}}, {arbiter: true}, {}, {}, {}],
+ useBridge: true
+});
+var conns = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+jsTestLog("Replica set test initialized");
+
+master.getDB("foo").bar.insert({x: 1});
+replTest.awaitReplication();
+
+conns[0].disconnect(conns[4]);
+conns[1].disconnect(conns[2]);
+conns[2].disconnect(conns[3]);
+conns[3].disconnect(conns[1]);
+
+// 4 is connected to 2
+conns[4].disconnect(conns[1]);
+conns[4].disconnect(conns[3]);
+
+assert.soon(function() {
+ master = replTest.getPrimary();
+ return master === conns[0];
+}, "node 0 should become primary before timeout", replTest.kDefaultTimeoutMS);
+
+replTest.awaitReplication();
+jsTestLog("Checking that ops still replicate correctly");
+var option = {writeConcern: {w: conns.length - 1, wtimeout: replTest.kDefaultTimeoutMS}};
+// In PV0, this write can fail as a result of a bad spanning tree. If 2 was syncing from 4 prior
+// to bridging, it will not change sync sources and receive the write in time. This was not a
+// problem in 3.0 because the old version of mongobridge caused all the nodes to restart during
+// partitioning, forcing the set to rebuild the spanning tree.
+assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
+
+// 4 is connected to 3
+conns[4].disconnect(conns[2]);
+conns[4].reconnect(conns[3]);
+
+assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/system_profile.js b/jstests/replsets/system_profile.js
index 7e1250da6dd..4e525d4fc93 100644
--- a/jstests/replsets/system_profile.js
+++ b/jstests/replsets/system_profile.js
@@ -2,48 +2,47 @@
// to the secondary.
(function() {
- "use strict";
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+"use strict";
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- // filter out noop writes
- var getLatestOp = function() {
- return primaryDB.getSiblingDB('local')
- .oplog.rs.find({op: {$ne: 'n'}})
- .sort({$natural: -1})
- .limit(1)
- .next();
- };
+// filter out noop writes
+var getLatestOp = function() {
+ return primaryDB.getSiblingDB('local')
+ .oplog.rs.find({op: {$ne: 'n'}})
+ .sort({$natural: -1})
+ .limit(1)
+ .next();
+};
- var primaryDB = rst.getPrimary().getDB('test');
- assert.writeOK(primaryDB.foo.insert({}));
- var op = getLatestOp();
+var primaryDB = rst.getPrimary().getDB('test');
+assert.writeOK(primaryDB.foo.insert({}));
+var op = getLatestOp();
- // Enable profiling on the primary
- assert.commandWorked(primaryDB.runCommand({profile: 2}));
- assert.eq(op, getLatestOp(), "oplog entry created when profile was enabled");
- assert.writeOK(primaryDB.foo.insert({}));
- op = getLatestOp();
- assert.commandWorked(primaryDB.runCommand({profile: 0}));
- assert.eq(op, getLatestOp(), "oplog entry created when profile was disabled");
+// Enable profiling on the primary
+assert.commandWorked(primaryDB.runCommand({profile: 2}));
+assert.eq(op, getLatestOp(), "oplog entry created when profile was enabled");
+assert.writeOK(primaryDB.foo.insert({}));
+op = getLatestOp();
+assert.commandWorked(primaryDB.runCommand({profile: 0}));
+assert.eq(op, getLatestOp(), "oplog entry created when profile was disabled");
- // dropCollection
- assert(primaryDB.system.profile.drop());
- assert.eq(op, getLatestOp(), "oplog entry created when system.profile was dropped");
+// dropCollection
+assert(primaryDB.system.profile.drop());
+assert.eq(op, getLatestOp(), "oplog entry created when system.profile was dropped");
- assert.commandWorked(primaryDB.createCollection("system.profile", {capped: true, size: 1000}));
- assert.eq(op, getLatestOp(), "oplog entry created when system.profile was created");
- assert.commandWorked(primaryDB.runCommand({profile: 2}));
- assert.writeOK(primaryDB.foo.insert({}));
- op = getLatestOp();
- assert.commandWorked(primaryDB.runCommand({profile: 0}));
+assert.commandWorked(primaryDB.createCollection("system.profile", {capped: true, size: 1000}));
+assert.eq(op, getLatestOp(), "oplog entry created when system.profile was created");
+assert.commandWorked(primaryDB.runCommand({profile: 2}));
+assert.writeOK(primaryDB.foo.insert({}));
+op = getLatestOp();
+assert.commandWorked(primaryDB.runCommand({profile: 0}));
- // emptycapped the collection
- assert.commandWorked(primaryDB.runCommand({emptycapped: "system.profile"}));
- assert.eq(
- op, getLatestOp(), "oplog entry created when system.profile was emptied via emptycapped");
- assert(primaryDB.system.profile.drop());
- rst.stopSet();
+// emptycapped the collection
+assert.commandWorked(primaryDB.runCommand({emptycapped: "system.profile"}));
+assert.eq(op, getLatestOp(), "oplog entry created when system.profile was emptied via emptycapped");
+assert(primaryDB.system.profile.drop());
+rst.stopSet();
})();
diff --git a/jstests/replsets/system_profile_secondary.js b/jstests/replsets/system_profile_secondary.js
index 7c62e126323..954ec0bf523 100644
--- a/jstests/replsets/system_profile_secondary.js
+++ b/jstests/replsets/system_profile_secondary.js
@@ -1,26 +1,24 @@
// This tests that we can successfully profile queries on secondaries.
(function() {
- 'use strict';
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- rst.initiate();
- rst.awaitReplication();
+'use strict';
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+rst.initiate();
+rst.awaitReplication();
- var secondaryDB = rst.getSecondary().getDB('test');
+var secondaryDB = rst.getSecondary().getDB('test');
- jsTestLog('Enable profiling on the secondary');
- assert.commandWorked(secondaryDB.runCommand({profile: 2}));
+jsTestLog('Enable profiling on the secondary');
+assert.commandWorked(secondaryDB.runCommand({profile: 2}));
- jsTestLog('Perform a query that returns no results, but will get profiled.');
- secondaryDB.doesntexist.find({}).itcount();
+jsTestLog('Perform a query that returns no results, but will get profiled.');
+secondaryDB.doesntexist.find({}).itcount();
- let numProfileEntries = (coll) =>
- coll.getDB()
- .system.profile.find({op: 'query', ns: coll.getFullName(), nreturned: 0})
- .itcount();
+let numProfileEntries = (coll) =>
+ coll.getDB().system.profile.find({op: 'query', ns: coll.getFullName(), nreturned: 0}).itcount();
- jsTestLog('Check the query is in the profile and turn profiling off.');
- assert.eq(numProfileEntries(secondaryDB.doesntexist), 1, 'expected a single profile entry');
- assert.commandWorked(secondaryDB.runCommand({profile: 0}));
- rst.stopSet();
+jsTestLog('Check the query is in the profile and turn profiling off.');
+assert.eq(numProfileEntries(secondaryDB.doesntexist), 1, 'expected a single profile entry');
+assert.commandWorked(secondaryDB.runCommand({profile: 0}));
+rst.stopSet();
})();
diff --git a/jstests/replsets/tags.js b/jstests/replsets/tags.js
index 78ab1e50588..1c753988af8 100644
--- a/jstests/replsets/tags.js
+++ b/jstests/replsets/tags.js
@@ -1,8 +1,8 @@
(function() {
- 'use strict';
+'use strict';
- load("jstests/replsets/libs/tags.js");
+load("jstests/replsets/libs/tags.js");
- let nodes = [{}, {}, {}, {}, {}];
- new TagsTest({nodes: nodes}).run();
+let nodes = [{}, {}, {}, {}, {}];
+new TagsTest({nodes: nodes}).run();
}());
diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js
index 7ee2fe81031..361b6204c08 100644
--- a/jstests/replsets/tags2.js
+++ b/jstests/replsets/tags2.js
@@ -1,60 +1,60 @@
// Change a write concern mode from 2 to 3 servers
(function() {
- "use strict";
-
- var host = getHostName();
- var replTest = new ReplSetTest({nodes: 4});
- var nodes = replTest.startSet();
- var ports = replTest.ports;
- var conf = {
- _id: replTest.name,
- members: [
- {_id: 0, host: host + ":" + ports[0], tags: {"backup": "A"}},
- {_id: 1, host: host + ":" + ports[1], tags: {"backup": "B"}},
- {_id: 2, host: host + ":" + ports[2], tags: {"backup": "C"}},
- {_id: 3, host: host + ":" + ports[3], tags: {"backup": "D"}, arbiterOnly: true}
- ],
- settings: {getLastErrorModes: {backedUp: {backup: 2}}}
- };
-
- print("arbiters can't have tags");
- var result = nodes[0].getDB("admin").runCommand({replSetInitiate: conf});
- printjson(result);
- assert.eq(result.ok, 0);
-
- conf.members.pop();
- replTest.stop(3);
- replTest.remove(3);
- replTest.initiate(conf);
-
- replTest.awaitReplication();
-
- var master = replTest.getPrimary();
- var db = master.getDB("test");
- var wtimeout = ReplSetTest.kDefaultTimeoutMS;
-
- assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
-
- var nextVersion = replTest.getReplSetConfigFromNode().version + 1;
- conf.version = nextVersion;
- conf.settings.getLastErrorModes.backedUp.backup = 3;
- master.getDB("admin").runCommand({replSetReconfig: conf});
- replTest.awaitReplication();
-
- master = replTest.getPrimary();
- var db = master.getDB("test");
- assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
-
- nextVersion++;
- conf.version = nextVersion;
- conf.members[0].priorty = 3;
- conf.members[2].priorty = 0;
- master.getDB("admin").runCommand({replSetReconfig: conf});
-
- master = replTest.getPrimary();
- var db = master.getDB("test");
- assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
-
- replTest.stopSet();
+"use strict";
+
+var host = getHostName();
+var replTest = new ReplSetTest({nodes: 4});
+var nodes = replTest.startSet();
+var ports = replTest.ports;
+var conf = {
+ _id: replTest.name,
+ members: [
+ {_id: 0, host: host + ":" + ports[0], tags: {"backup": "A"}},
+ {_id: 1, host: host + ":" + ports[1], tags: {"backup": "B"}},
+ {_id: 2, host: host + ":" + ports[2], tags: {"backup": "C"}},
+ {_id: 3, host: host + ":" + ports[3], tags: {"backup": "D"}, arbiterOnly: true}
+ ],
+ settings: {getLastErrorModes: {backedUp: {backup: 2}}}
+};
+
+print("arbiters can't have tags");
+var result = nodes[0].getDB("admin").runCommand({replSetInitiate: conf});
+printjson(result);
+assert.eq(result.ok, 0);
+
+conf.members.pop();
+replTest.stop(3);
+replTest.remove(3);
+replTest.initiate(conf);
+
+replTest.awaitReplication();
+
+var master = replTest.getPrimary();
+var db = master.getDB("test");
+var wtimeout = ReplSetTest.kDefaultTimeoutMS;
+
+assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
+
+var nextVersion = replTest.getReplSetConfigFromNode().version + 1;
+conf.version = nextVersion;
+conf.settings.getLastErrorModes.backedUp.backup = 3;
+master.getDB("admin").runCommand({replSetReconfig: conf});
+replTest.awaitReplication();
+
+master = replTest.getPrimary();
+var db = master.getDB("test");
+assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
+
+nextVersion++;
+conf.version = nextVersion;
+conf.members[0].priorty = 3;
+conf.members[2].priorty = 0;
+master.getDB("admin").runCommand({replSetReconfig: conf});
+
+master = replTest.getPrimary();
+var db = master.getDB("test");
+assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: wtimeout}}));
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js
index d942a8e54d9..6f28faf300a 100644
--- a/jstests/replsets/tags_with_reconfig.js
+++ b/jstests/replsets/tags_with_reconfig.js
@@ -4,70 +4,70 @@
// reported their progress to a primary.
(function() {
- "use strict";
+"use strict";
- // Start a replica set with 3 nodes
- var host = getHostName();
- var replTest = new ReplSetTest({nodes: 3});
- var nodes = replTest.startSet();
- var ports = replTest.ports;
+// Start a replica set with 3 nodes
+var host = getHostName();
+var replTest = new ReplSetTest({nodes: 3});
+var nodes = replTest.startSet();
+var ports = replTest.ports;
- // Set tags and getLastErrorModes
- var conf = {
- _id: replTest.name,
- members: [
- {_id: 0, host: host + ":" + ports[0], tags: {"dc": "bbb"}},
- {_id: 1, host: host + ":" + ports[1], tags: {"dc": "bbb"}},
- {_id: 2, host: host + ":" + ports[2], tags: {"dc": "ccc"}}
- ],
- settings: {
- getLastErrorModes: {
- anydc: {dc: 1},
- alldc: {dc: 2},
- }
+// Set tags and getLastErrorModes
+var conf = {
+ _id: replTest.name,
+ members: [
+ {_id: 0, host: host + ":" + ports[0], tags: {"dc": "bbb"}},
+ {_id: 1, host: host + ":" + ports[1], tags: {"dc": "bbb"}},
+ {_id: 2, host: host + ":" + ports[2], tags: {"dc": "ccc"}}
+ ],
+ settings: {
+ getLastErrorModes: {
+ anydc: {dc: 1},
+ alldc: {dc: 2},
}
- };
+ }
+};
- replTest.initiate(conf);
- replTest.awaitReplication();
+replTest.initiate(conf);
+replTest.awaitReplication();
- var wtimeout = ReplSetTest.kDefaultTimeoutMS;
- var master = replTest.getPrimary();
- var db = master.getDB("test");
+var wtimeout = ReplSetTest.kDefaultTimeoutMS;
+var master = replTest.getPrimary();
+var db = master.getDB("test");
- // Insert a document with write concern : anydc
- assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
+// Insert a document with write concern : anydc
+assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
- // Insert a document with write concern : alldc
- assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
+// Insert a document with write concern : alldc
+assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
- // Add a new tag to the replica set
- var config = master.getDB("local").system.replset.findOne();
- printjson(config);
- var modes = config.settings.getLastErrorModes;
- config.version++;
- config.members[0].tags.newtag = "newtag";
+// Add a new tag to the replica set
+var config = master.getDB("local").system.replset.findOne();
+printjson(config);
+var modes = config.settings.getLastErrorModes;
+config.version++;
+config.members[0].tags.newtag = "newtag";
- try {
- master.getDB("admin").runCommand({replSetReconfig: config});
- } catch (e) {
- print(e);
- }
+try {
+ master.getDB("admin").runCommand({replSetReconfig: config});
+} catch (e) {
+ print(e);
+}
- replTest.awaitReplication();
+replTest.awaitReplication();
- // Print the new config for replica set
- var config = master.getDB("local").system.replset.findOne();
- printjson(config);
+// Print the new config for replica set
+var config = master.getDB("local").system.replset.findOne();
+printjson(config);
- master = replTest.getPrimary();
- var db = master.getDB("test");
+master = replTest.getPrimary();
+var db = master.getDB("test");
- // Insert a document with write concern : anydc
- assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
+// Insert a document with write concern : anydc
+assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: wtimeout}}));
- // Insert a document with write concern : alldc
- assert.writeOK(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
+// Insert a document with write concern : alldc
+assert.writeOK(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: wtimeout}}));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/temp_namespace_restart_as_standalone.js b/jstests/replsets/temp_namespace_restart_as_standalone.js
index 89179d35428..e5061629c82 100644
--- a/jstests/replsets/temp_namespace_restart_as_standalone.js
+++ b/jstests/replsets/temp_namespace_restart_as_standalone.js
@@ -5,100 +5,99 @@
* @tags: [requires_persistence, requires_majority_read_concern, requires_replication]
*/
(function() {
- var rst = new ReplSetTest({nodes: 2});
- rst.startSet();
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet();
- // Rig the election so that the first node becomes the primary and remains primary despite the
- // secondary being terminated during this test.
- var replSetConfig = rst.getReplSetConfig();
- replSetConfig.members[1].priority = 0;
- replSetConfig.members[1].votes = 0;
- rst.initiate(replSetConfig);
+// Rig the election so that the first node becomes the primary and remains primary despite the
+// secondary being terminated during this test.
+var replSetConfig = rst.getReplSetConfig();
+replSetConfig.members[1].priority = 0;
+replSetConfig.members[1].votes = 0;
+rst.initiate(replSetConfig);
- var primaryConn = rst.getPrimary();
- var secondaryConn = rst.getSecondary();
+var primaryConn = rst.getPrimary();
+var secondaryConn = rst.getSecondary();
- var primaryDB = primaryConn.getDB("test");
- var secondaryDB = secondaryConn.getDB("test");
+var primaryDB = primaryConn.getDB("test");
+var secondaryDB = secondaryConn.getDB("test");
- // Create a temporary collection and wait until the operation has replicated to the secondary.
- assert.commandWorked(primaryDB.runCommand({
- applyOps: [{
- op: "c",
- ns: primaryDB.getName() + ".$cmd",
- o: {
- create: "temp_collection",
- temp: true,
- writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}
- }
- }]
- }));
+// Create a temporary collection and wait until the operation has replicated to the secondary.
+assert.commandWorked(primaryDB.runCommand({
+ applyOps: [{
+ op: "c",
+ ns: primaryDB.getName() + ".$cmd",
+ o: {
+ create: "temp_collection",
+ temp: true,
+ writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}
+ }
+ }]
+}));
- rst.awaitReplication();
+rst.awaitReplication();
- // Verify that the temporary collection exists on the primary and has temp=true.
- var primaryCollectionInfos = primaryDB.getCollectionInfos({name: "temp_collection"});
- assert.eq(1, primaryCollectionInfos.length, "'temp_collection' wasn't created on the primary");
- assert.eq("temp_collection",
- primaryCollectionInfos[0].name,
- "'temp_collection' wasn't created on the primary");
- assert.eq(true,
- primaryCollectionInfos[0].options.temp,
- "'temp_collection' wasn't created as temporary on the primary: " +
- tojson(primaryCollectionInfos[0].options));
+// Verify that the temporary collection exists on the primary and has temp=true.
+var primaryCollectionInfos = primaryDB.getCollectionInfos({name: "temp_collection"});
+assert.eq(1, primaryCollectionInfos.length, "'temp_collection' wasn't created on the primary");
+assert.eq("temp_collection",
+ primaryCollectionInfos[0].name,
+ "'temp_collection' wasn't created on the primary");
+assert.eq(true,
+ primaryCollectionInfos[0].options.temp,
+ "'temp_collection' wasn't created as temporary on the primary: " +
+ tojson(primaryCollectionInfos[0].options));
- // Verify that the temporary collection exists on the secondary and has temp=true.
- var secondaryCollectionInfos = secondaryDB.getCollectionInfos({name: "temp_collection"});
- assert.eq(
- 1, secondaryCollectionInfos.length, "'temp_collection' wasn't created on the secondary");
- assert.eq("temp_collection",
- secondaryCollectionInfos[0].name,
- "'temp_collection' wasn't created on the secondary");
- assert.eq(true,
- secondaryCollectionInfos[0].options.temp,
- "'temp_collection' wasn't created as temporary on the secondary: " +
- tojson(secondaryCollectionInfos[0].options));
+// Verify that the temporary collection exists on the secondary and has temp=true.
+var secondaryCollectionInfos = secondaryDB.getCollectionInfos({name: "temp_collection"});
+assert.eq(1, secondaryCollectionInfos.length, "'temp_collection' wasn't created on the secondary");
+assert.eq("temp_collection",
+ secondaryCollectionInfos[0].name,
+ "'temp_collection' wasn't created on the secondary");
+assert.eq(true,
+ secondaryCollectionInfos[0].options.temp,
+ "'temp_collection' wasn't created as temporary on the secondary: " +
+ tojson(secondaryCollectionInfos[0].options));
- // Shut down the secondary and restart it as a stand-alone mongod.
- var secondaryNodeId = rst.getNodeId(secondaryDB.getMongo());
- rst.stop(secondaryNodeId);
+// Shut down the secondary and restart it as a stand-alone mongod.
+var secondaryNodeId = rst.getNodeId(secondaryDB.getMongo());
+rst.stop(secondaryNodeId);
- var storageEngine = jsTest.options().storageEngine || "wiredTiger";
- if (storageEngine === "wiredTiger") {
- secondaryConn = MongoRunner.runMongod({
- dbpath: secondaryConn.dbpath,
- noCleanData: true,
- setParameter: {recoverFromOplogAsStandalone: true}
- });
- } else {
- secondaryConn = MongoRunner.runMongod({dbpath: secondaryConn.dbpath, noCleanData: true});
- }
- assert.neq(null, secondaryConn, "secondary failed to start up as a stand-alone mongod");
- secondaryDB = secondaryConn.getDB("test");
+var storageEngine = jsTest.options().storageEngine || "wiredTiger";
+if (storageEngine === "wiredTiger") {
+ secondaryConn = MongoRunner.runMongod({
+ dbpath: secondaryConn.dbpath,
+ noCleanData: true,
+ setParameter: {recoverFromOplogAsStandalone: true}
+ });
+} else {
+ secondaryConn = MongoRunner.runMongod({dbpath: secondaryConn.dbpath, noCleanData: true});
+}
+assert.neq(null, secondaryConn, "secondary failed to start up as a stand-alone mongod");
+secondaryDB = secondaryConn.getDB("test");
- // Verify that the temporary collection still exists on the secondary and has temp=true.
- secondaryCollectionInfos = secondaryDB.getCollectionInfos({name: "temp_collection"});
- assert.eq(1,
- secondaryCollectionInfos.length,
- "'temp_collection' was dropped after restarting the secondary as a stand-alone");
- assert.eq("temp_collection",
- secondaryCollectionInfos[0].name,
- "'temp_collection' was dropped after restarting the secondary as a stand-alone");
- assert.eq(true,
- secondaryCollectionInfos[0].options.temp,
- "'temp_collection' is no longer temporary after restarting the secondary as a" +
- " stand-alone: " + tojson(secondaryCollectionInfos[0].options));
+// Verify that the temporary collection still exists on the secondary and has temp=true.
+secondaryCollectionInfos = secondaryDB.getCollectionInfos({name: "temp_collection"});
+assert.eq(1,
+ secondaryCollectionInfos.length,
+ "'temp_collection' was dropped after restarting the secondary as a stand-alone");
+assert.eq("temp_collection",
+ secondaryCollectionInfos[0].name,
+ "'temp_collection' was dropped after restarting the secondary as a stand-alone");
+assert.eq(true,
+ secondaryCollectionInfos[0].options.temp,
+ "'temp_collection' is no longer temporary after restarting the secondary as a" +
+ " stand-alone: " + tojson(secondaryCollectionInfos[0].options));
- // Shut down the secondary and restart it as a member of the replica set.
- MongoRunner.stopMongod(secondaryConn);
+// Shut down the secondary and restart it as a member of the replica set.
+MongoRunner.stopMongod(secondaryConn);
- var restart = true;
- rst.start(secondaryNodeId, {}, restart);
+var restart = true;
+rst.start(secondaryNodeId, {}, restart);
- // Verify that writes are replicated to the temporary collection and can successfully be applied
- // by the secondary after having restarted it.
- assert.writeOK(primaryDB.temp_collection.insert(
- {}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
+// Verify that writes are replicated to the temporary collection and can successfully be applied
+// by the secondary after having restarted it.
+assert.writeOK(primaryDB.temp_collection.insert(
+ {}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}}));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/test_command.js b/jstests/replsets/test_command.js
index cc4ef0f0d0f..a8228464cef 100644
--- a/jstests/replsets/test_command.js
+++ b/jstests/replsets/test_command.js
@@ -3,143 +3,140 @@
// waitForDrainFinish - waits for primary to finish draining its applier queue.
(function() {
- 'use strict';
- var name = 'test_command';
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({
- _id: name,
- members: [
- {_id: 0, host: nodes[0], priority: 3},
- {_id: 1, host: nodes[1]},
- {_id: 2, host: nodes[2], arbiterOnly: true},
- ],
- });
-
- // Stabilize replica set with node 0 as primary.
-
- assert.commandWorked(replSet.nodes[0].adminCommand({
+'use strict';
+var name = 'test_command';
+var replSet = new ReplSetTest({name: name, nodes: 3});
+var nodes = replSet.nodeList();
+replSet.startSet();
+replSet.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: nodes[0], priority: 3},
+ {_id: 1, host: nodes[1]},
+ {_id: 2, host: nodes[2], arbiterOnly: true},
+ ],
+});
+
+// Stabilize replica set with node 0 as primary.
+
+assert.commandWorked(replSet.nodes[0].adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: 60 * 1000,
+}),
+ 'node 0' + replSet.nodes[0].host + ' failed to become primary');
+
+// We need the try/catch to handle that the node may have hung up the connection due
+// to a state change.
+try {
+ assert.commandWorked(replSet.nodes[1].adminCommand({
replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
+ waitForMemberState: ReplSetTest.State.SECONDARY,
timeoutMillis: 60 * 1000,
- }),
- 'node 0' + replSet.nodes[0].host + ' failed to become primary');
-
- // We need the try/catch to handle that the node may have hung up the connection due
- // to a state change.
- try {
- assert.commandWorked(replSet.nodes[1].adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.SECONDARY,
- timeoutMillis: 60 * 1000,
- }));
- } catch (e) {
- jsTestLog(e);
- assert.commandWorked(replSet.nodes[1].adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.SECONDARY,
- timeoutMillis: 60 * 1000,
- }),
- 'node 1' + replSet.nodes[1].host + ' failed to become secondary');
- }
-
- var primary = replSet.getPrimary();
- var secondary = replSet.getSecondary();
-
- // Check replication mode.
-
- assert.commandFailedWithCode(primary.getDB(name).runCommand({
- replSetTest: 1,
- }),
- ErrorCodes.Unauthorized,
- 'replSetTest should fail against non-admin database');
-
- assert.commandWorked(primary.adminCommand({
+ }));
+} catch (e) {
+ jsTestLog(e);
+ assert.commandWorked(replSet.nodes[1].adminCommand({
replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.SECONDARY,
+ timeoutMillis: 60 * 1000,
}),
- 'failed to check replication mode');
-
- // waitForMemberState tests.
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: 'what state',
- timeoutMillis: 1000,
- }),
- ErrorCodes.TypeMismatch,
- 'replSetTest waitForMemberState should fail on non-numerical state');
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
- timeoutMillis: "what timeout",
- }),
- ErrorCodes.TypeMismatch,
- 'replSetTest waitForMemberState should fail on non-numerical timeout');
-
- assert.commandFailedWithCode(primary.adminCommand({
+ 'node 1' + replSet.nodes[1].host + ' failed to become secondary');
+}
+
+var primary = replSet.getPrimary();
+var secondary = replSet.getSecondary();
+
+// Check replication mode.
+
+assert.commandFailedWithCode(primary.getDB(name).runCommand({
+ replSetTest: 1,
+}),
+ ErrorCodes.Unauthorized,
+ 'replSetTest should fail against non-admin database');
+
+assert.commandWorked(primary.adminCommand({
+ replSetTest: 1,
+}),
+ 'failed to check replication mode');
+
+// waitForMemberState tests.
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: 'what state',
+ timeoutMillis: 1000,
+}),
+ ErrorCodes.TypeMismatch,
+ 'replSetTest waitForMemberState should fail on non-numerical state');
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: "what timeout",
+}),
+ ErrorCodes.TypeMismatch,
+ 'replSetTest waitForMemberState should fail on non-numerical timeout');
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: 9999,
+ timeoutMillis: 1000,
+}),
+ ErrorCodes.BadValue,
+ 'replSetTest waitForMemberState should fail on invalid state');
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: -1000,
+}),
+ ErrorCodes.BadValue,
+ 'replSetTest waitForMemberState should fail on negative timeout');
+
+assert.commandFailedWithCode(
+ primary.adminCommand({
replSetTest: 1,
- waitForMemberState: 9999,
+ waitForMemberState: ReplSetTest.State.SECONDARY,
timeoutMillis: 1000,
}),
- ErrorCodes.BadValue,
- 'replSetTest waitForMemberState should fail on invalid state');
-
- assert.commandFailedWithCode(primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
- timeoutMillis: -1000,
- }),
- ErrorCodes.BadValue,
- 'replSetTest waitForMemberState should fail on negative timeout');
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.SECONDARY,
- timeoutMillis: 1000,
- }),
- ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForMemberState(SECONDARY) should time out on node 0 ' + primary.host);
-
- assert.commandWorked(
- secondary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.SECONDARY,
- timeoutMillis: 1000,
- }),
- 'replSetTest waitForMemberState(SECONDARY) failed on node 1 ' + secondary.host);
-
- // waitForDrainFinish tests.
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 'what state',
- }),
- ErrorCodes.TypeMismatch,
- 'replSetTest waitForDrainFinish should fail on non-numerical timeout');
-
- assert.commandFailedWithCode(primary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: -1000,
- }),
- ErrorCodes.BadValue,
- 'replSetTest waitForDrainFinish should fail on negative timeout');
+ ErrorCodes.ExceededTimeLimit,
+ 'replSetTest waitForMemberState(SECONDARY) should time out on node 0 ' + primary.host);
- assert.commandWorked(primary.adminCommand({
+assert.commandWorked(
+ secondary.adminCommand({
replSetTest: 1,
- waitForDrainFinish: 1000,
- }),
- 'node 0' + primary.host + ' failed to wait for drain to finish');
-
- assert.commandWorked(secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 0,
+ waitForMemberState: ReplSetTest.State.SECONDARY,
+ timeoutMillis: 1000,
}),
- 'node 1' + primary.host + ' failed to wait for drain to finish');
- replSet.stopSet();
+ 'replSetTest waitForMemberState(SECONDARY) failed on node 1 ' + secondary.host);
+
+// waitForDrainFinish tests.
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 'what state',
+}),
+ ErrorCodes.TypeMismatch,
+ 'replSetTest waitForDrainFinish should fail on non-numerical timeout');
+
+assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: -1000,
+}),
+ ErrorCodes.BadValue,
+ 'replSetTest waitForDrainFinish should fail on negative timeout');
+
+assert.commandWorked(primary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 1000,
+}),
+ 'node 0' + primary.host + ' failed to wait for drain to finish');
+
+assert.commandWorked(secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 0,
+}),
+ 'node 1' + primary.host + ' failed to wait for drain to finish');
+replSet.stopSet();
})();
diff --git a/jstests/replsets/too_stale_secondary.js b/jstests/replsets/too_stale_secondary.js
index d8d166fce83..b4235683667 100644
--- a/jstests/replsets/too_stale_secondary.js
+++ b/jstests/replsets/too_stale_secondary.js
@@ -29,124 +29,120 @@
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/replsets/rslib.js');
+load('jstests/replsets/rslib.js');
- function getFirstOplogEntry(conn) {
- return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0];
- }
+function getFirstOplogEntry(conn) {
+ return conn.getDB('local').oplog.rs.find().sort({$natural: 1}).limit(1)[0];
+}
- /**
- * Overflows the oplog of a given node.
- *
- * To detect oplog overflow, we continuously insert large documents until we
- * detect that the first entry of the oplog is no longer the same as when we started. This
- * implies that the oplog attempted to grow beyond its maximum size i.e. it
- * has overflowed/rolled over.
- *
- * Each document will be inserted with a writeConcern given by 'writeConcern'.
- *
- */
- function overflowOplog(conn, db, writeConcern) {
- var firstOplogEntry = getFirstOplogEntry(primary);
- var collName = "overflow";
-
- // Keep inserting large documents until the oplog rolls over.
- const largeStr = new Array(32 * 1024).join('aaaaaaaa');
- while (bsonWoCompare(getFirstOplogEntry(conn), firstOplogEntry) === 0) {
- assert.writeOK(
- db[collName].insert({data: largeStr}, {writeConcern: {w: writeConcern}}));
- }
+/**
+ * Overflows the oplog of a given node.
+ *
+ * To detect oplog overflow, we continuously insert large documents until we
+ * detect that the first entry of the oplog is no longer the same as when we started. This
+ * implies that the oplog attempted to grow beyond its maximum size i.e. it
+ * has overflowed/rolled over.
+ *
+ * Each document will be inserted with a writeConcern given by 'writeConcern'.
+ *
+ */
+function overflowOplog(conn, db, writeConcern) {
+ var firstOplogEntry = getFirstOplogEntry(primary);
+ var collName = "overflow";
+
+ // Keep inserting large documents until the oplog rolls over.
+ const largeStr = new Array(32 * 1024).join('aaaaaaaa');
+ while (bsonWoCompare(getFirstOplogEntry(conn), firstOplogEntry) === 0) {
+ assert.writeOK(db[collName].insert({data: largeStr}, {writeConcern: {w: writeConcern}}));
}
+}
- /**
- * True if a node's entry in "members" has tooStale: true.
- */
- function tooStale(conn) {
- return assert.commandWorked(conn.adminCommand("replSetGetStatus")).tooStale;
- }
+/**
+ * True if a node's entry in "members" has tooStale: true.
+ */
+function tooStale(conn) {
+ return assert.commandWorked(conn.adminCommand("replSetGetStatus")).tooStale;
+}
- var testName = "too_stale_secondary";
+var testName = "too_stale_secondary";
- var smallOplogSizeMB = 1;
- var bigOplogSizeMB = 1000;
+var smallOplogSizeMB = 1;
+var bigOplogSizeMB = 1000;
- // Node 0 is given a small oplog so we can overflow it. Node 1's large oplog allows it to
- // store all entries comfortably without overflowing, so that Node 2 can eventually use it as
- // a sync source after it goes too stale. Because this test overflows the oplog, a small
- // syncdelay is chosen to frequently take checkpoints, allowing oplog truncation to proceed.
- var replTest = new ReplSetTest({
- name: testName,
- nodes: [
- {oplogSize: smallOplogSizeMB},
- {oplogSize: bigOplogSizeMB},
- {oplogSize: smallOplogSizeMB}
- ],
- nodeOptions: {syncdelay: 1},
- });
+// Node 0 is given a small oplog so we can overflow it. Node 1's large oplog allows it to
+// store all entries comfortably without overflowing, so that Node 2 can eventually use it as
+// a sync source after it goes too stale. Because this test overflows the oplog, a small
+// syncdelay is chosen to frequently take checkpoints, allowing oplog truncation to proceed.
+var replTest = new ReplSetTest({
+ name: testName,
+ nodes:
+ [{oplogSize: smallOplogSizeMB}, {oplogSize: bigOplogSizeMB}, {oplogSize: smallOplogSizeMB}],
+ nodeOptions: {syncdelay: 1},
+});
- var nodes = replTest.startSet();
- replTest.initiate({
- _id: testName,
- members: [
- {_id: 0, host: nodes[0].host},
- {_id: 1, host: nodes[1].host, priority: 0},
- {_id: 2, host: nodes[2].host, priority: 0}
- ]
- });
+var nodes = replTest.startSet();
+replTest.initiate({
+ _id: testName,
+ members: [
+ {_id: 0, host: nodes[0].host},
+ {_id: 1, host: nodes[1].host, priority: 0},
+ {_id: 2, host: nodes[2].host, priority: 0}
+ ]
+});
- var dbName = testName;
- var collName = "test";
+var dbName = testName;
+var collName = "test";
- jsTestLog("Wait for Node 0 to become the primary.");
- replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
+jsTestLog("Wait for Node 0 to become the primary.");
+replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY);
- var primary = replTest.getPrimary();
- var primaryTestDB = primary.getDB(dbName);
+var primary = replTest.getPrimary();
+var primaryTestDB = primary.getDB(dbName);
- jsTestLog("1: Insert one document on the primary (Node 0) and ensure it is replicated.");
- assert.writeOK(primaryTestDB[collName].insert({a: 1}, {writeConcern: {w: 3}}));
- assert(!tooStale(replTest.nodes[2]));
+jsTestLog("1: Insert one document on the primary (Node 0) and ensure it is replicated.");
+assert.writeOK(primaryTestDB[collName].insert({a: 1}, {writeConcern: {w: 3}}));
+assert(!tooStale(replTest.nodes[2]));
- jsTestLog("2: Stop Node 2.");
- replTest.stop(2);
+jsTestLog("2: Stop Node 2.");
+replTest.stop(2);
- jsTestLog("3: Wait until Node 2 is down.");
- replTest.waitForState(replTest.nodes[2], ReplSetTest.State.DOWN);
+jsTestLog("3: Wait until Node 2 is down.");
+replTest.waitForState(replTest.nodes[2], ReplSetTest.State.DOWN);
- var firstOplogEntryNode1 = getFirstOplogEntry(replTest.nodes[1]);
+var firstOplogEntryNode1 = getFirstOplogEntry(replTest.nodes[1]);
- jsTestLog("4: Overflow the primary's oplog.");
- overflowOplog(primary, primaryTestDB, 2);
+jsTestLog("4: Overflow the primary's oplog.");
+overflowOplog(primary, primaryTestDB, 2);
- // Make sure that Node 1's oplog didn't overflow.
- assert.eq(firstOplogEntryNode1,
- getFirstOplogEntry(replTest.nodes[1]),
- "Node 1's oplog overflowed unexpectedly.");
+// Make sure that Node 1's oplog didn't overflow.
+assert.eq(firstOplogEntryNode1,
+ getFirstOplogEntry(replTest.nodes[1]),
+ "Node 1's oplog overflowed unexpectedly.");
- jsTestLog("5: Stop Node 1 and restart Node 2.");
- replTest.stop(1);
- replTest.restart(2);
+jsTestLog("5: Stop Node 1 and restart Node 2.");
+replTest.stop(1);
+replTest.restart(2);
- jsTestLog("6: Wait for Node 2 to transition to RECOVERING (it should be too stale).");
- replTest.waitForState(replTest.nodes[2], ReplSetTest.State.RECOVERING);
- assert(tooStale(replTest.nodes[2]));
+jsTestLog("6: Wait for Node 2 to transition to RECOVERING (it should be too stale).");
+replTest.waitForState(replTest.nodes[2], ReplSetTest.State.RECOVERING);
+assert(tooStale(replTest.nodes[2]));
- jsTestLog("7: Stop and restart Node 2.");
- replTest.stop(2);
- replTest.restart(2);
+jsTestLog("7: Stop and restart Node 2.");
+replTest.stop(2);
+replTest.restart(2);
- jsTestLog(
- "8: Wait for Node 2 to transition to RECOVERING (its oplog should remain stale after restart)");
- replTest.waitForState(replTest.nodes[2], ReplSetTest.State.RECOVERING);
+jsTestLog(
+ "8: Wait for Node 2 to transition to RECOVERING (its oplog should remain stale after restart)");
+replTest.waitForState(replTest.nodes[2], ReplSetTest.State.RECOVERING);
- jsTestLog("9: Restart Node 1, which should have the full oplog history.");
- replTest.restart(1);
+jsTestLog("9: Restart Node 1, which should have the full oplog history.");
+replTest.restart(1);
- jsTestLog("10: Wait for Node 2 to leave RECOVERING and transition to SECONDARY.");
- replTest.waitForState(replTest.nodes[2], ReplSetTest.State.SECONDARY);
- assert(!tooStale(replTest.nodes[2]));
+jsTestLog("10: Wait for Node 2 to leave RECOVERING and transition to SECONDARY.");
+replTest.waitForState(replTest.nodes[2], ReplSetTest.State.SECONDARY);
+assert(!tooStale(replTest.nodes[2]));
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/transaction_table_multi_statement_txn.js b/jstests/replsets/transaction_table_multi_statement_txn.js
index 37579e35aba..01fc3a577d5 100644
--- a/jstests/replsets/transaction_table_multi_statement_txn.js
+++ b/jstests/replsets/transaction_table_multi_statement_txn.js
@@ -5,46 +5,45 @@
* @tags: [uses_transactions]
*/
(function() {
- 'use strict';
+'use strict';
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}]});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}]});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
- const session = primary.startSession();
- const primaryDB = session.getDatabase('test');
- const coll = primaryDB.getCollection('coll');
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+const session = primary.startSession();
+const primaryDB = session.getDatabase('test');
+const coll = primaryDB.getCollection('coll');
- jsTestLog('Creating collection ' + coll.getFullName());
- assert.commandWorked(
- primaryDB.createCollection(coll.getName(), {writeConcern: {w: "majority"}}));
- replTest.awaitReplication();
+jsTestLog('Creating collection ' + coll.getFullName());
+assert.commandWorked(primaryDB.createCollection(coll.getName(), {writeConcern: {w: "majority"}}));
+replTest.awaitReplication();
- const sessionId = session.getSessionId();
- jsTestLog('Starting transaction on session ' + sessionId);
- session.startTransaction();
- assert.writeOK(coll.insert({_id: 0}));
- assert.writeOK(coll.insert({_id: 1}));
- assert.commandWorked(session.commitTransaction_forTesting());
- const opTime = session.getOperationTime();
- const txnNum = session.getTxnNumber_forTesting();
- jsTestLog('Successfully committed transaction at operation time ' + tojson(opTime) +
- 'with transaction number ' + txnNum);
+const sessionId = session.getSessionId();
+jsTestLog('Starting transaction on session ' + sessionId);
+session.startTransaction();
+assert.writeOK(coll.insert({_id: 0}));
+assert.writeOK(coll.insert({_id: 1}));
+assert.commandWorked(session.commitTransaction_forTesting());
+const opTime = session.getOperationTime();
+const txnNum = session.getTxnNumber_forTesting();
+jsTestLog('Successfully committed transaction at operation time ' + tojson(opTime) +
+ 'with transaction number ' + txnNum);
- // After replication, assert the secondary's transaction table has been updated.
- replTest.awaitReplication();
- jsTestLog('Checking transaction tables on both primary and secondary.');
- jsTestLog('Primary ' + primary.host + ': ' +
- tojson(primary.getDB('config').transactions.find().toArray()));
- jsTestLog('Secondary ' + secondary.host + ': ' +
- tojson(secondary.getDB('config').transactions.find().toArray()));
- RetryableWritesUtil.checkTransactionTable(primary, sessionId, txnNum, opTime);
- RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, sessionId);
+// After replication, assert the secondary's transaction table has been updated.
+replTest.awaitReplication();
+jsTestLog('Checking transaction tables on both primary and secondary.');
+jsTestLog('Primary ' + primary.host + ': ' +
+ tojson(primary.getDB('config').transactions.find().toArray()));
+jsTestLog('Secondary ' + secondary.host + ': ' +
+ tojson(secondary.getDB('config').transactions.find().toArray()));
+RetryableWritesUtil.checkTransactionTable(primary, sessionId, txnNum, opTime);
+RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, sessionId);
- session.endSession();
- replTest.stopSet();
+session.endSession();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/transaction_table_oplog_replay.js b/jstests/replsets/transaction_table_oplog_replay.js
index b40ab630a49..eb155343127 100644
--- a/jstests/replsets/transaction_table_oplog_replay.js
+++ b/jstests/replsets/transaction_table_oplog_replay.js
@@ -2,201 +2,204 @@
* Tests that the transaction table is properly updated on secondaries through oplog replay.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/retryable_writes_util.js");
+load("jstests/libs/retryable_writes_util.js");
- if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
- jsTestLog("Retryable writes are not supported, skipping test");
- return;
- }
+if (!RetryableWritesUtil.storageEngineSupportsRetryableWrites(jsTest.options().storageEngine)) {
+ jsTestLog("Retryable writes are not supported, skipping test");
+ return;
+}
- /**
- * Runs each command on the primary, awaits replication then asserts the secondary's transaction
- * collection has been updated to store the latest txnNumber and lastWriteOpTimeTs for each
- * sessionId.
- */
- function runCommandsWithDifferentIds(primary, secondary, cmds) {
- // Disable oplog application to ensure the oplog entries come in the same batch.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
-
- let responseTimestamps = [];
- cmds.forEach(function(cmd) {
- let res = assert.commandWorked(primary.getDB("test").runCommand(cmd));
- let opTime = (res.opTime.ts ? res.opTime.ts : res.opTime);
-
- RetryableWritesUtil.checkTransactionTable(primary, cmd.lsid, cmd.txnNumber, opTime);
- responseTimestamps.push(opTime);
- });
-
- // After replication, assert the secondary's transaction table has been updated.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
- replTest.awaitReplication();
- cmds.forEach(function(cmd, i) {
- RetryableWritesUtil.checkTransactionTable(
- secondary, cmd.lsid, cmd.txnNumber, responseTimestamps[i]);
- });
-
- // Both nodes should have the same transaction collection record for each sessionId.
- cmds.forEach(function(cmd) {
- RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, cmd.lsid);
- });
- }
+/**
+ * Runs each command on the primary, awaits replication then asserts the secondary's transaction
+ * collection has been updated to store the latest txnNumber and lastWriteOpTimeTs for each
+ * sessionId.
+ */
+function runCommandsWithDifferentIds(primary, secondary, cmds) {
+ // Disable oplog application to ensure the oplog entries come in the same batch.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
- /**
- * Runs each command on the primary and tracks the highest txnNumber and lastWriteOpTimeTs, then
- * asserts the secondary's transaction collection document for the sessionId has been updated
- * correctly.
- */
- function runCommandsWithSameId(primary, secondary, cmds) {
- // Disable oplog application to ensure the oplog entries come in the same batch.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
-
- let latestOpTimeTs = Timestamp();
- let highestTxnNumber = NumberLong(-1);
- cmds.forEach(function(cmd) {
- let res = assert.commandWorked(primary.getDB("test").runCommand(cmd));
- let opTime = (res.opTime.ts ? res.opTime.ts : res.opTime);
-
- RetryableWritesUtil.checkTransactionTable(primary, cmd.lsid, cmd.txnNumber, opTime);
- latestOpTimeTs = opTime;
- highestTxnNumber =
- (cmd.txnNumber > highestTxnNumber ? cmd.txnNumber : highestTxnNumber);
- });
-
- // After replication, assert the secondary's transaction table has been updated to store the
- // highest transaction number and the latest write optime.
- secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
- replTest.awaitReplication();
+ let responseTimestamps = [];
+ cmds.forEach(function(cmd) {
+ let res = assert.commandWorked(primary.getDB("test").runCommand(cmd));
+ let opTime = (res.opTime.ts ? res.opTime.ts : res.opTime);
+
+ RetryableWritesUtil.checkTransactionTable(primary, cmd.lsid, cmd.txnNumber, opTime);
+ responseTimestamps.push(opTime);
+ });
+
+ // After replication, assert the secondary's transaction table has been updated.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
+ replTest.awaitReplication();
+ cmds.forEach(function(cmd, i) {
RetryableWritesUtil.checkTransactionTable(
- secondary, cmds[0].lsid, highestTxnNumber, latestOpTimeTs);
+ secondary, cmd.lsid, cmd.txnNumber, responseTimestamps[i]);
+ });
- // Both nodes should have the same transaction collection record for the sessionId.
- RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, cmds[0].lsid);
+ // Both nodes should have the same transaction collection record for each sessionId.
+ cmds.forEach(function(cmd) {
+ RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, cmd.lsid);
+ });
+}
+
+/**
+ * Runs each command on the primary and tracks the highest txnNumber and lastWriteOpTimeTs, then
+ * asserts the secondary's transaction collection document for the sessionId has been updated
+ * correctly.
+ */
+function runCommandsWithSameId(primary, secondary, cmds) {
+ // Disable oplog application to ensure the oplog entries come in the same batch.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"});
+
+ let latestOpTimeTs = Timestamp();
+ let highestTxnNumber = NumberLong(-1);
+ cmds.forEach(function(cmd) {
+ let res = assert.commandWorked(primary.getDB("test").runCommand(cmd));
+ let opTime = (res.opTime.ts ? res.opTime.ts : res.opTime);
+
+ RetryableWritesUtil.checkTransactionTable(primary, cmd.lsid, cmd.txnNumber, opTime);
+ latestOpTimeTs = opTime;
+ highestTxnNumber = (cmd.txnNumber > highestTxnNumber ? cmd.txnNumber : highestTxnNumber);
+ });
+
+ // After replication, assert the secondary's transaction table has been updated to store the
+ // highest transaction number and the latest write optime.
+ secondary.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"});
+ replTest.awaitReplication();
+ RetryableWritesUtil.checkTransactionTable(
+ secondary, cmds[0].lsid, highestTxnNumber, latestOpTimeTs);
+
+ // Both nodes should have the same transaction collection record for the sessionId.
+ RetryableWritesUtil.assertSameRecordOnBothConnections(primary, secondary, cmds[0].lsid);
+}
+
+const replTest = new ReplSetTest({nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+let primary = replTest.getPrimary();
+let secondary = replTest.getSecondary();
+
+////////////////////////////////////////////////////////////////////////
+// Test insert command
+
+let insertCmds = [
+ {
+ insert: "foo",
+ documents: [{_id: 10}, {_id: 20}, {_id: 30}, {_id: 40}],
+ ordered: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(5)
+ },
+ {
+ insert: "bar",
+ documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(10)
}
+];
+runCommandsWithDifferentIds(primary, secondary, insertCmds);
- const replTest = new ReplSetTest({nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- let primary = replTest.getPrimary();
- let secondary = replTest.getSecondary();
-
- ////////////////////////////////////////////////////////////////////////
- // Test insert command
-
- let insertCmds = [
- {
- insert: "foo",
- documents: [{_id: 10}, {_id: 20}, {_id: 30}, {_id: 40}],
- ordered: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(5)
- },
- {
- insert: "bar",
- documents: [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(10)
- }
- ];
- runCommandsWithDifferentIds(primary, secondary, insertCmds);
-
- let lsid = {id: UUID()};
- insertCmds = insertCmds.map(function(cmd) {
- cmd.documents.forEach(function(doc) {
- doc._id = doc._id + 100;
- });
- cmd.lsid = lsid;
- cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
- return cmd;
+let lsid = {id: UUID()};
+insertCmds = insertCmds.map(function(cmd) {
+ cmd.documents.forEach(function(doc) {
+ doc._id = doc._id + 100;
});
- runCommandsWithSameId(primary, secondary, insertCmds);
-
- ////////////////////////////////////////////////////////////////////////
- // Test update command
-
- let updateCommands = [
- {
- update: "foo",
- updates: [
- {q: {_id: 10}, u: {$set: {x: 10}}, upsert: false},
- {q: {_id: 20}, u: {$set: {x: 20}}, upsert: false},
- {q: {_id: 30}, u: {$set: {x: 30}}, upsert: false},
- {q: {_id: 40}, u: {$set: {x: 40}}, upsert: false}
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(5)
- },
- {
- update: "bar",
- updates: [
- {q: {_id: 1}, u: {$set: {x: 10}}, upsert: true},
- {q: {_id: 2}, u: {$set: {x: 20}}, upsert: true},
- {q: {_id: 3}, u: {$set: {x: 30}}, upsert: true},
- {q: {_id: 4}, u: {$set: {x: 40}}, upsert: true}
- ],
- ordered: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(10)
- }
- ];
- runCommandsWithDifferentIds(primary, secondary, updateCommands);
-
- lsid = {id: UUID()};
- updateCommands = updateCommands.map(function(cmd) {
- cmd.updates.forEach(function(up) {
- up.q._id = up.q._id + 100;
- });
- cmd.lsid = lsid;
- cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
- return cmd;
+ cmd.lsid = lsid;
+ cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
+ return cmd;
+});
+runCommandsWithSameId(primary, secondary, insertCmds);
+
+////////////////////////////////////////////////////////////////////////
+// Test update command
+
+let updateCommands = [
+ {
+ update: "foo",
+ updates: [
+ {q: {_id: 10}, u: {$set: {x: 10}}, upsert: false},
+ {q: {_id: 20}, u: {$set: {x: 20}}, upsert: false},
+ {q: {_id: 30}, u: {$set: {x: 30}}, upsert: false},
+ {q: {_id: 40}, u: {$set: {x: 40}}, upsert: false}
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(5)
+ },
+ {
+ update: "bar",
+ updates: [
+ {q: {_id: 1}, u: {$set: {x: 10}}, upsert: true},
+ {q: {_id: 2}, u: {$set: {x: 20}}, upsert: true},
+ {q: {_id: 3}, u: {$set: {x: 30}}, upsert: true},
+ {q: {_id: 4}, u: {$set: {x: 40}}, upsert: true}
+ ],
+ ordered: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(10)
+ }
+];
+runCommandsWithDifferentIds(primary, secondary, updateCommands);
+
+lsid = {
+ id: UUID()
+};
+updateCommands = updateCommands.map(function(cmd) {
+ cmd.updates.forEach(function(up) {
+ up.q._id = up.q._id + 100;
});
- runCommandsWithSameId(primary, secondary, updateCommands);
-
- ////////////////////////////////////////////////////////////////////////
- // Test delete command
-
- let deleteCommands = [
- {
- delete: "foo",
- deletes: [
- {q: {_id: 10}, limit: 1},
- {q: {_id: 20}, limit: 1},
- {q: {_id: 30}, limit: 1},
- {q: {_id: 40}, limit: 1}
- ],
- ordered: true,
- lsid: {id: UUID()},
- txnNumber: NumberLong(5)
- },
- {
- delete: "bar",
- deletes: [
- {q: {_id: 1}, limit: 1},
- {q: {_id: 2}, limit: 1},
- {q: {_id: 3}, limit: 1},
- {q: {_id: 4}, limit: 1}
- ],
- ordered: false,
- lsid: {id: UUID()},
- txnNumber: NumberLong(10)
- }
- ];
- runCommandsWithDifferentIds(primary, secondary, deleteCommands);
-
- lsid = {id: UUID()};
- deleteCommands = deleteCommands.map(function(cmd) {
- cmd.deletes.forEach(function(d) {
- d.q._id = d.q._id + 100;
- });
- cmd.lsid = lsid;
- cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
- return cmd;
+ cmd.lsid = lsid;
+ cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
+ return cmd;
+});
+runCommandsWithSameId(primary, secondary, updateCommands);
+
+////////////////////////////////////////////////////////////////////////
+// Test delete command
+
+let deleteCommands = [
+ {
+ delete: "foo",
+ deletes: [
+ {q: {_id: 10}, limit: 1},
+ {q: {_id: 20}, limit: 1},
+ {q: {_id: 30}, limit: 1},
+ {q: {_id: 40}, limit: 1}
+ ],
+ ordered: true,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(5)
+ },
+ {
+ delete: "bar",
+ deletes: [
+ {q: {_id: 1}, limit: 1},
+ {q: {_id: 2}, limit: 1},
+ {q: {_id: 3}, limit: 1},
+ {q: {_id: 4}, limit: 1}
+ ],
+ ordered: false,
+ lsid: {id: UUID()},
+ txnNumber: NumberLong(10)
+ }
+];
+runCommandsWithDifferentIds(primary, secondary, deleteCommands);
+
+lsid = {
+ id: UUID()
+};
+deleteCommands = deleteCommands.map(function(cmd) {
+ cmd.deletes.forEach(function(d) {
+ d.q._id = d.q._id + 100;
});
- runCommandsWithSameId(primary, secondary, deleteCommands);
+ cmd.lsid = lsid;
+ cmd.txnNumber = NumberLong(cmd.txnNumber + 100);
+ return cmd;
+});
+runCommandsWithSameId(primary, secondary, deleteCommands);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/transactions_after_rollback_via_refetch.js b/jstests/replsets/transactions_after_rollback_via_refetch.js
index cb9ea1c3257..463d70f4489 100644
--- a/jstests/replsets/transactions_after_rollback_via_refetch.js
+++ b/jstests/replsets/transactions_after_rollback_via_refetch.js
@@ -7,115 +7,116 @@
* @tags: [uses_transactions]
*/
(function() {
- 'use strict';
-
- load("jstests/replsets/libs/rollback_test.js");
-
- let name = "transactions_after_rollback_via_refetch";
- let dbName = name;
- let crudCollName = "crudColl";
- let collToDropName = "collToDrop";
-
- let CommonOps = (node) => {
- // Insert a couple of documents that will initially be present on all nodes.
- let crudColl = node.getDB(dbName)[crudCollName];
- assert.commandWorked(crudColl.insert({_id: 0}));
- assert.commandWorked(crudColl.insert({_id: 1}));
-
- // Create a collection so it can be dropped on the rollback node.
- node.getDB(dbName)[collToDropName].insert({_id: 0});
- };
-
- // We want to have the rollback node perform some inserts, updates, and deletes locally
- // during the rollback process, so we can ensure that transactions will read correct data
- // post-rollback, even though these writes will be un-timestamped.
- let RollbackOps = (node) => {
- let crudColl = node.getDB(dbName)[crudCollName];
- // Roll back an update (causes refetch and local update).
- assert.commandWorked(crudColl.update({_id: 0}, {$set: {rollbackNode: 0}}));
- // Roll back a delete (causes refetch and local insert).
- assert.commandWorked(crudColl.remove({_id: 1}));
- // Roll back an insert (causes local delete).
- assert.commandWorked(crudColl.insert({_id: 2}));
-
- // Roll back a drop (re-creates the collection).
- node.getDB(dbName)[collToDropName].drop();
- };
-
- let SyncSourceOps = (node) => {
- let coll = node.getDB(dbName)[crudCollName];
- // Update these docs so the rollback node will refetch them.
- assert.commandWorked(coll.update({_id: 0}, {$set: {syncSource: 0}}));
- assert.commandWorked(coll.update({_id: 1}, {$set: {syncSource: 1}}));
- };
-
- // Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so that
- // they will use the "rollbackViaRefetch" algorithm.
- let replTest = new ReplSetTest({
- name,
- nodes: 3,
- useBridge: true,
- settings: {chainingAllowed: false},
- nodeOptions: {enableMajorityReadConcern: "false"}
- });
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- replTest.initiate(config);
-
- let rollbackTest = new RollbackTest(name, replTest);
-
- CommonOps(rollbackTest.getPrimary());
-
- let rollbackNode = rollbackTest.transitionToRollbackOperations();
- RollbackOps(rollbackNode);
-
- let syncSourceNode = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
- SyncSourceOps(syncSourceNode);
-
- // Wait for rollback to finish.
- rollbackTest.transitionToSyncSourceOperationsDuringRollback();
- rollbackTest.transitionToSteadyStateOperations();
-
- // Make the rollback node primary so we can run transactions against it.
- rollbackTest.getTestFixture().stepUp(rollbackNode);
-
- jsTestLog("Testing transactions against the node that just rolled back.");
- const sessionOptions = {causalConsistency: false};
- let session = rollbackNode.getDB(dbName).getMongo().startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
- let sessionColl = sessionDb[crudCollName];
-
- // Make sure we can do basic CRUD ops inside a transaction and read the data back correctly, pre
- // and post-commit.
- session.startTransaction();
- // Make sure we read from the snapshot correctly.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
- [{_id: 0, syncSource: 0}, {_id: 1, syncSource: 1}]);
- // Do some basic ops.
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
- assert.commandWorked(sessionColl.remove({_id: 1}));
- assert.commandWorked(sessionColl.insert({_id: 2}));
- // Make sure we read the updated data correctly.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
- [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Make sure data is visible after commit.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
- [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
-
- // Run a transaction that touches the collection that was re-created during rollback.
- sessionColl = sessionDb[collToDropName];
- session.startTransaction();
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0}]);
- assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Make sure data is visible after commit.
- assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0, inTxn: 1}]);
-
- // Check the replica set.
- rollbackTest.stop();
-
+'use strict';
+
+load("jstests/replsets/libs/rollback_test.js");
+
+let name = "transactions_after_rollback_via_refetch";
+let dbName = name;
+let crudCollName = "crudColl";
+let collToDropName = "collToDrop";
+
+let CommonOps = (node) => {
+ // Insert a couple of documents that will initially be present on all nodes.
+ let crudColl = node.getDB(dbName)[crudCollName];
+ assert.commandWorked(crudColl.insert({_id: 0}));
+ assert.commandWorked(crudColl.insert({_id: 1}));
+
+ // Create a collection so it can be dropped on the rollback node.
+ node.getDB(dbName)[collToDropName].insert({_id: 0});
+};
+
+// We want to have the rollback node perform some inserts, updates, and deletes locally
+// during the rollback process, so we can ensure that transactions will read correct data
+// post-rollback, even though these writes will be un-timestamped.
+let RollbackOps = (node) => {
+ let crudColl = node.getDB(dbName)[crudCollName];
+ // Roll back an update (causes refetch and local update).
+ assert.commandWorked(crudColl.update({_id: 0}, {$set: {rollbackNode: 0}}));
+ // Roll back a delete (causes refetch and local insert).
+ assert.commandWorked(crudColl.remove({_id: 1}));
+ // Roll back an insert (causes local delete).
+ assert.commandWorked(crudColl.insert({_id: 2}));
+
+ // Roll back a drop (re-creates the collection).
+ node.getDB(dbName)[collToDropName].drop();
+};
+
+let SyncSourceOps = (node) => {
+ let coll = node.getDB(dbName)[crudCollName];
+ // Update these docs so the rollback node will refetch them.
+ assert.commandWorked(coll.update({_id: 0}, {$set: {syncSource: 0}}));
+ assert.commandWorked(coll.update({_id: 1}, {$set: {syncSource: 1}}));
+};
+
+// Set up a replica set for use in RollbackTest. We disable majority reads on all nodes so that
+// they will use the "rollbackViaRefetch" algorithm.
+let replTest = new ReplSetTest({
+ name,
+ nodes: 3,
+ useBridge: true,
+ settings: {chainingAllowed: false},
+ nodeOptions: {enableMajorityReadConcern: "false"}
+});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[2].priority = 0;
+replTest.initiate(config);
+
+let rollbackTest = new RollbackTest(name, replTest);
+
+CommonOps(rollbackTest.getPrimary());
+
+let rollbackNode = rollbackTest.transitionToRollbackOperations();
+RollbackOps(rollbackNode);
+
+let syncSourceNode = rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
+SyncSourceOps(syncSourceNode);
+
+// Wait for rollback to finish.
+rollbackTest.transitionToSyncSourceOperationsDuringRollback();
+rollbackTest.transitionToSteadyStateOperations();
+
+// Make the rollback node primary so we can run transactions against it.
+rollbackTest.getTestFixture().stepUp(rollbackNode);
+
+jsTestLog("Testing transactions against the node that just rolled back.");
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = rollbackNode.getDB(dbName).getMongo().startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+let sessionColl = sessionDb[crudCollName];
+
+// Make sure we can do basic CRUD ops inside a transaction and read the data back correctly, pre
+// and post-commit.
+session.startTransaction();
+// Make sure we read from the snapshot correctly.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
+ [{_id: 0, syncSource: 0}, {_id: 1, syncSource: 1}]);
+// Do some basic ops.
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
+assert.commandWorked(sessionColl.remove({_id: 1}));
+assert.commandWorked(sessionColl.insert({_id: 2}));
+// Make sure we read the updated data correctly.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
+ [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Make sure data is visible after commit.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(),
+ [{_id: 0, syncSource: 0, inTxn: 1}, {_id: 2}]);
+
+// Run a transaction that touches the collection that was re-created during rollback.
+sessionColl = sessionDb[collToDropName];
+session.startTransaction();
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0}]);
+assert.commandWorked(sessionColl.update({_id: 0}, {$set: {inTxn: 1}}));
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Make sure data is visible after commit.
+assert.docEq(sessionColl.find().sort({_id: 1}).toArray(), [{_id: 0, inTxn: 1}]);
+
+// Check the replica set.
+rollbackTest.stop();
}());
diff --git a/jstests/replsets/transactions_committed_with_tickets_exhausted.js b/jstests/replsets/transactions_committed_with_tickets_exhausted.js
index 9fe978a38d6..786d2b34777 100644
--- a/jstests/replsets/transactions_committed_with_tickets_exhausted.js
+++ b/jstests/replsets/transactions_committed_with_tickets_exhausted.js
@@ -5,92 +5,91 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
-
- load("jstests/libs/parallelTester.js"); // for ScopedThread
- load("jstests/core/txns/libs/prepare_helpers.js");
-
- // We set the number of write tickets to be a small value in order to avoid needing to spawn a
- // large number of threads to exhaust all of the available ones.
- const kNumWriteTickets = 5;
-
- const rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {
- setParameter: {
- wiredTigerConcurrentWriteTransactions: kNumWriteTickets,
-
- // Setting a transaction lifetime of 20 seconds works fine locally because the
- // threads which attempt to run the drop command are spawned quickly enough. This
- // might not be the case for Evergreen hosts and may need to be tuned accordingly.
- transactionLifetimeLimitSeconds: 20,
- }
+"use strict";
+
+load("jstests/libs/parallelTester.js"); // for ScopedThread
+load("jstests/core/txns/libs/prepare_helpers.js");
+
+// We set the number of write tickets to be a small value in order to avoid needing to spawn a
+// large number of threads to exhaust all of the available ones.
+const kNumWriteTickets = 5;
+
+const rst = new ReplSetTest({
+ nodes: 1,
+ nodeOptions: {
+ setParameter: {
+ wiredTigerConcurrentWriteTransactions: kNumWriteTickets,
+
+ // Setting a transaction lifetime of 20 seconds works fine locally because the
+ // threads which attempt to run the drop command are spawned quickly enough. This
+ // might not be the case for Evergreen hosts and may need to be tuned accordingly.
+ transactionLifetimeLimitSeconds: 20,
}
- });
- rst.startSet();
- rst.initiate();
+ }
+});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
- const session = primary.startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
+const session = primary.startSession({causalConsistency: false});
+const sessionDb = session.getDatabase("test");
- assert.commandWorked(db.runCommand({create: "mycoll"}));
+assert.commandWorked(db.runCommand({create: "mycoll"}));
- jsTestLog("Starting transaction");
- session.startTransaction();
- assert.commandWorked(sessionDb.mycoll.insert({}));
+jsTestLog("Starting transaction");
+session.startTransaction();
+assert.commandWorked(sessionDb.mycoll.insert({}));
- jsTestLog("Preparing transaction");
- let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
+jsTestLog("Preparing transaction");
+let prepareTimestamp = PrepareHelpers.prepareTransaction(session);
- const threads = [];
+const threads = [];
- for (let i = 0; i < kNumWriteTickets; ++i) {
- const thread = new ScopedThread(function(host) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
+for (let i = 0; i < kNumWriteTickets; ++i) {
+ const thread = new ScopedThread(function(host) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
- // Dropping a collection requires a database X lock and therefore blocks behind the
- // transaction committing or aborting.
- db.mycoll.drop();
+ // Dropping a collection requires a database X lock and therefore blocks behind the
+ // transaction committing or aborting.
+ db.mycoll.drop();
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, primary.host);
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+ }, primary.host);
+
+ threads.push(thread);
+ thread.start();
+}
+
+// We wait until all of the drop commands are waiting for a lock to know that we've exhausted
+// all of the available write tickets.
+assert.soon(
+ () => {
+ const ops = db.currentOp({"command.drop": "mycoll", waitingForLock: true});
+ return ops.inprog.length === kNumWriteTickets;
+ },
+ () => {
+ return `Didn't find ${kNumWriteTickets} drop commands running: ` + tojson(db.currentOp());
+ });
- threads.push(thread);
- thread.start();
- }
+// Should be able to successfully commit the transaction with the write tickets exhausted.
+jsTestLog("Committing transaction");
+assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
- // We wait until all of the drop commands are waiting for a lock to know that we've exhausted
- // all of the available write tickets.
- assert.soon(
- () => {
- const ops = db.currentOp({"command.drop": "mycoll", waitingForLock: true});
- return ops.inprog.length === kNumWriteTickets;
- },
- () => {
- return `Didn't find ${kNumWriteTickets} drop commands running: ` +
- tojson(db.currentOp());
- });
-
- // Should be able to successfully commit the transaction with the write tickets exhausted.
- jsTestLog("Committing transaction");
- assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp));
-
- jsTestLog("Waiting for drop command to join");
- for (let thread of threads) {
- thread.join();
- }
+jsTestLog("Waiting for drop command to join");
+for (let thread of threads) {
+ thread.join();
+}
- for (let thread of threads) {
- assert.commandWorked(thread.returnData());
- }
+for (let thread of threads) {
+ assert.commandWorked(thread.returnData());
+}
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/transactions_during_step_down.js b/jstests/replsets/transactions_during_step_down.js
index 99eb4223a41..eb6aa6dad6e 100644
--- a/jstests/replsets/transactions_during_step_down.js
+++ b/jstests/replsets/transactions_during_step_down.js
@@ -6,129 +6,129 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
-
- const testName = "txnsDuringStepDown";
- const dbName = testName;
- const collName = "testcoll";
-
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
- rst.startSet();
- rst.initiate();
-
- var primary = rst.getPrimary();
- var db = primary.getDB(dbName);
- var primaryAdmin = primary.getDB("admin");
- var primaryColl = db[collName];
- var collNss = primaryColl.getFullName();
-
- jsTestLog("Writing data to collection.");
- assert.writeOK(primaryColl.insert({_id: 'readOp'}, {"writeConcern": {"w": 2}}));
-
- TestData.dbName = dbName;
- TestData.collName = collName;
- TestData.skipRetryOnNetworkError = true;
-
- function startTxn({parallel: parallel = true}) {
- var txnFunc = () => {
- jsTestLog("Starting a new transaction.");
- const session = db.getMongo().startSession();
- const sessionDb = session.getDatabase(TestData.dbName);
- const sessionColl = sessionDb[TestData.collName];
- session.startTransaction({writeConcern: {w: "majority"}});
- print(TestData.cmd);
- eval(TestData.cmd);
-
- // Validate that the connection is not closed on step down.
- assert.commandWorked(db.adminCommand({ping: 1}));
- };
- return parallel ? startParallelShell(txnFunc, primary.port) : txnFunc();
- }
-
- function runStepDown() {
- jsTestLog("Making primary step down.");
- assert.commandWorked(primaryAdmin.runCommand({"replSetStepDown": 30 * 60, "force": true}));
-
- // Wait until the primary transitioned to SECONDARY state.
- rst.waitForState(primary, ReplSetTest.State.SECONDARY);
-
- jsTestLog("Validating data.");
- assert.docEq([{_id: 'readOp'}], primaryColl.find().toArray());
-
- jsTestLog("Making old primary eligible to be re-elected.");
- assert.commandWorked(primaryAdmin.runCommand({replSetFreeze: 0}));
- rst.getPrimary();
- }
-
- function testTxnFailsWithCode({
- op,
- failPoint: failPoint = 'hangAfterPreallocateSnapshot',
- nss: nss = dbName + '.$cmd',
- preOp: preOp = ''
- }) {
- jsTestLog("Enabling failPoint '" + failPoint + "' on primary.");
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: failPoint,
- data: {shouldContinueOnInterrupt: true},
- mode: "alwaysOn"
- }));
-
- // Start transaction.
- TestData.cmd = preOp +
- `assert.commandFailedWithCode(${op}, ErrorCodes.InterruptedDueToReplStateChange);`;
- const waitForTxnShell = startTxn({});
-
- jsTestLog("Waiting for primary to reach failPoint '" + failPoint + "'.");
- waitForCurOpByFailPoint(primaryAdmin, nss, failPoint);
-
- // Call step down & validate data.
- runStepDown();
-
- // Wait for transaction shell to join.
- waitForTxnShell();
-
- // Disable fail point.
- assert.commandWorked(primaryAdmin.runCommand({configureFailPoint: failPoint, mode: 'off'}));
- }
-
- function testAbortOrCommitTxnFailsWithCode(params) {
- params["preOp"] = `sessionColl.insert({_id: 'abortOrCommitTxnOp'});`;
- params["nss"] = "admin.$cmd";
- testTxnFailsWithCode(params);
- }
-
- jsTestLog("Testing stepdown during read transaction.");
- testTxnFailsWithCode({op: "sessionDb.runCommand({find: '" + collName + "', batchSize: 1})"});
-
- jsTestLog("Testing stepdown during write transaction.");
- testTxnFailsWithCode({op: "sessionColl.insert({_id: 'writeOp'})"});
-
- jsTestLog("Testing stepdown during read-write transaction.");
- testTxnFailsWithCode({
- op: "sessionDb.runCommand({findAndModify: '" + collName +
- "', query: {_id: 'readOp'}, remove: true})"
- });
-
- jsTestLog("Testing stepdown during commit transaction.");
- testAbortOrCommitTxnFailsWithCode(
- {failPoint: "hangBeforeCommitingTxn", op: "session.commitTransaction_forTesting()"});
-
- jsTestLog("Testing stepdown during abort transaction.");
- testAbortOrCommitTxnFailsWithCode(
- {failPoint: "hangBeforeAbortingTxn", op: "session.abortTransaction_forTesting()"});
-
- jsTestLog("Testing stepdown during running transaction in inactive state.");
- TestData.cmd = "assert.writeOK(sessionColl.insert({_id: 'inactiveTxnOp'}))";
- // Do not start the transaction in parallel shell because when the parallel
- // shell work is done, implicit call to "endSessions" and "abortTransaction"
- // cmds are made. So, during step down we might not have any running
- // transaction to interrupt.
- startTxn({parallel: false});
+"use strict";
+
+load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+
+const testName = "txnsDuringStepDown";
+const dbName = testName;
+const collName = "testcoll";
+
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}]});
+rst.startSet();
+rst.initiate();
+
+var primary = rst.getPrimary();
+var db = primary.getDB(dbName);
+var primaryAdmin = primary.getDB("admin");
+var primaryColl = db[collName];
+var collNss = primaryColl.getFullName();
+
+jsTestLog("Writing data to collection.");
+assert.writeOK(primaryColl.insert({_id: 'readOp'}, {"writeConcern": {"w": 2}}));
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+TestData.skipRetryOnNetworkError = true;
+
+function startTxn({parallel: parallel = true}) {
+ var txnFunc = () => {
+ jsTestLog("Starting a new transaction.");
+ const session = db.getMongo().startSession();
+ const sessionDb = session.getDatabase(TestData.dbName);
+ const sessionColl = sessionDb[TestData.collName];
+ session.startTransaction({writeConcern: {w: "majority"}});
+ print(TestData.cmd);
+ eval(TestData.cmd);
+
+ // Validate that the connection is not closed on step down.
+ assert.commandWorked(db.adminCommand({ping: 1}));
+ };
+ return parallel ? startParallelShell(txnFunc, primary.port) : txnFunc();
+}
+
+function runStepDown() {
+ jsTestLog("Making primary step down.");
+ assert.commandWorked(primaryAdmin.runCommand({"replSetStepDown": 30 * 60, "force": true}));
+
+ // Wait until the primary transitioned to SECONDARY state.
+ rst.waitForState(primary, ReplSetTest.State.SECONDARY);
+
+ jsTestLog("Validating data.");
+ assert.docEq([{_id: 'readOp'}], primaryColl.find().toArray());
+
+ jsTestLog("Making old primary eligible to be re-elected.");
+ assert.commandWorked(primaryAdmin.runCommand({replSetFreeze: 0}));
+ rst.getPrimary();
+}
+
+function testTxnFailsWithCode({
+ op,
+ failPoint: failPoint = 'hangAfterPreallocateSnapshot',
+ nss: nss = dbName + '.$cmd',
+ preOp: preOp = ''
+}) {
+ jsTestLog("Enabling failPoint '" + failPoint + "' on primary.");
+ assert.commandWorked(primary.adminCommand({
+ configureFailPoint: failPoint,
+ data: {shouldContinueOnInterrupt: true},
+ mode: "alwaysOn"
+ }));
+
+ // Start transaction.
+ TestData.cmd =
+ preOp + `assert.commandFailedWithCode(${op}, ErrorCodes.InterruptedDueToReplStateChange);`;
+ const waitForTxnShell = startTxn({});
+
+ jsTestLog("Waiting for primary to reach failPoint '" + failPoint + "'.");
+ waitForCurOpByFailPoint(primaryAdmin, nss, failPoint);
// Call step down & validate data.
runStepDown();
- rst.stopSet();
+ // Wait for transaction shell to join.
+ waitForTxnShell();
+
+ // Disable fail point.
+ assert.commandWorked(primaryAdmin.runCommand({configureFailPoint: failPoint, mode: 'off'}));
+}
+
+function testAbortOrCommitTxnFailsWithCode(params) {
+ params["preOp"] = `sessionColl.insert({_id: 'abortOrCommitTxnOp'});`;
+ params["nss"] = "admin.$cmd";
+ testTxnFailsWithCode(params);
+}
+
+jsTestLog("Testing stepdown during read transaction.");
+testTxnFailsWithCode({op: "sessionDb.runCommand({find: '" + collName + "', batchSize: 1})"});
+
+jsTestLog("Testing stepdown during write transaction.");
+testTxnFailsWithCode({op: "sessionColl.insert({_id: 'writeOp'})"});
+
+jsTestLog("Testing stepdown during read-write transaction.");
+testTxnFailsWithCode({
+ op: "sessionDb.runCommand({findAndModify: '" + collName +
+ "', query: {_id: 'readOp'}, remove: true})"
+});
+
+jsTestLog("Testing stepdown during commit transaction.");
+testAbortOrCommitTxnFailsWithCode(
+ {failPoint: "hangBeforeCommitingTxn", op: "session.commitTransaction_forTesting()"});
+
+jsTestLog("Testing stepdown during abort transaction.");
+testAbortOrCommitTxnFailsWithCode(
+ {failPoint: "hangBeforeAbortingTxn", op: "session.abortTransaction_forTesting()"});
+
+jsTestLog("Testing stepdown during running transaction in inactive state.");
+TestData.cmd = "assert.writeOK(sessionColl.insert({_id: 'inactiveTxnOp'}))";
+// Do not start the transaction in parallel shell because when the parallel
+// shell work is done, implicit call to "endSessions" and "abortTransaction"
+// cmds are made. So, during step down we might not have any running
+// transaction to interrupt.
+startTxn({parallel: false});
+
+// Call step down & validate data.
+runStepDown();
+
+rst.stopSet();
})();
diff --git a/jstests/replsets/transactions_on_secondaries_not_allowed.js b/jstests/replsets/transactions_on_secondaries_not_allowed.js
index 1dfd1afdddd..59784afe1f3 100644
--- a/jstests/replsets/transactions_on_secondaries_not_allowed.js
+++ b/jstests/replsets/transactions_on_secondaries_not_allowed.js
@@ -5,83 +5,86 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
+
+const dbName = "test";
+const collName = "transactions_on_secondaries_not_allowed";
+
+const rst = new ReplSetTest({name: collName, nodes: 2});
+rst.startSet({verbose: 3});
+// We want a stable topology, so make the secondary unelectable.
+let config = rst.getReplSetConfig();
+config.members[1].priority = 0;
+rst.initiate(config);
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+const secondaryTestDB = secondary.getDB(dbName);
+
+// Do an initial write so we have something to find.
+const initialDoc = {
+ _id: 0
+};
+assert.commandWorked(primary.getDB(dbName)[collName].insert(initialDoc));
+rst.awaitLastOpCommitted();
+
+// Disable the best-effort check for primary-ness in the service entry point, so that we
+// exercise the real check for primary-ness in TransactionParticipant::beginOrContinue.
+assert.commandWorked(secondary.adminCommand(
+ {configureFailPoint: "skipCheckingForNotMasterInCommandDispatch", mode: "alwaysOn"}));
+
+// Initiate a session on the secondary.
+const sessionOptions = {
+ causalConsistency: false,
+ retryWrites: true
+};
+const session = secondaryTestDB.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(dbName);
- const dbName = "test";
- const collName = "transactions_on_secondaries_not_allowed";
-
- const rst = new ReplSetTest({name: collName, nodes: 2});
- rst.startSet({verbose: 3});
- // We want a stable topology, so make the secondary unelectable.
- let config = rst.getReplSetConfig();
- config.members[1].priority = 0;
- rst.initiate(config);
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- const secondaryTestDB = secondary.getDB(dbName);
-
- // Do an initial write so we have something to find.
- const initialDoc = {_id: 0};
- assert.commandWorked(primary.getDB(dbName)[collName].insert(initialDoc));
- rst.awaitLastOpCommitted();
-
- // Disable the best-effort check for primary-ness in the service entry point, so that we
- // exercise the real check for primary-ness in TransactionParticipant::beginOrContinue.
- assert.commandWorked(secondary.adminCommand(
- {configureFailPoint: "skipCheckingForNotMasterInCommandDispatch", mode: "alwaysOn"}));
-
- // Initiate a session on the secondary.
- const sessionOptions = {causalConsistency: false, retryWrites: true};
- const session = secondaryTestDB.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(dbName);
-
- /**
- * Test starting a transaction and issuing a commitTransaction command.
- */
+/**
+ * Test starting a transaction and issuing a commitTransaction command.
+ */
- jsTestLog("Start a read-only transaction on the secondary.");
- session.startTransaction({readConcern: {level: "snapshot"}});
+jsTestLog("Start a read-only transaction on the secondary.");
+session.startTransaction({readConcern: {level: "snapshot"}});
- // Try to read a document (the first statement in the transaction) and verify that this fails.
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
+// Try to read a document (the first statement in the transaction) and verify that this fails.
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
- // The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
- jsTestLog(
- "Make sure we are not allowed to run the commitTransaction command on the secondary.");
- assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NotMaster);
+// The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
+jsTestLog("Make sure we are not allowed to run the commitTransaction command on the secondary.");
+assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NotMaster);
- /**
- * Test starting a transaction and issuing an abortTransaction command.
- */
+/**
+ * Test starting a transaction and issuing an abortTransaction command.
+ */
- jsTestLog("Start a different read-only transaction on the secondary.");
- session.startTransaction({readConcern: {level: "snapshot"}});
+jsTestLog("Start a different read-only transaction on the secondary.");
+session.startTransaction({readConcern: {level: "snapshot"}});
- // Try to read a document (the first statement in the transaction) and verify that this fails.
- assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
+// Try to read a document (the first statement in the transaction) and verify that this fails.
+assert.commandFailedWithCode(sessionDb.runCommand({find: collName}), ErrorCodes.NotMaster);
- // The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
- jsTestLog("Make sure we are not allowed to run the abortTransaction command on the secondary.");
- assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NotMaster);
+// The check for "NotMaster" supercedes the check for "NoSuchTransaction" in this case.
+jsTestLog("Make sure we are not allowed to run the abortTransaction command on the secondary.");
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NotMaster);
- /**
- * Test starting a retryable write.
- */
+/**
+ * Test starting a retryable write.
+ */
- jsTestLog("Start a retryable write");
- assert.commandFailedWithCode(sessionDb.foo.insert({_id: 0}), ErrorCodes.NotMaster);
+jsTestLog("Start a retryable write");
+assert.commandFailedWithCode(sessionDb.foo.insert({_id: 0}), ErrorCodes.NotMaster);
- /**
- * Test starting a read with txnNumber, but without autocommit. This fails in general because
- * txnNumber isn't supported for the find command outside of transactions, but we check that
- * this fails on a secondary.
- */
+/**
+ * Test starting a read with txnNumber, but without autocommit. This fails in general because
+ * txnNumber isn't supported for the find command outside of transactions, but we check that
+ * this fails on a secondary.
+ */
- jsTestLog("Start a read with txnNumber but without autocommit");
- assert.commandFailedWithCode(sessionDb.runCommand({find: 'foo', txnNumber: NumberLong(10)}),
- 50768);
+jsTestLog("Start a read with txnNumber but without autocommit");
+assert.commandFailedWithCode(sessionDb.runCommand({find: 'foo', txnNumber: NumberLong(10)}), 50768);
- session.endSession();
- rst.stopSet(undefined, false, {skipValidation: true});
+session.endSession();
+rst.stopSet(undefined, false, {skipValidation: true});
}());
diff --git a/jstests/replsets/transactions_only_allowed_on_primaries.js b/jstests/replsets/transactions_only_allowed_on_primaries.js
index 3d0633cc423..2ca360eca41 100644
--- a/jstests/replsets/transactions_only_allowed_on_primaries.js
+++ b/jstests/replsets/transactions_only_allowed_on_primaries.js
@@ -4,128 +4,128 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- // In 4.0, we allow read-only transactions on secondaries when test commands are enabled, so we
- // disable them in this test, to test that transactions on secondaries will be disallowed
- // for production users.
- jsTest.setOption('enableTestCommands', false);
- TestData.roleGraphInvalidationIsFatal = false;
- TestData.authenticationDatabase = "local";
-
- const dbName = "test";
- const collName = "transactions_only_allowed_on_primaries";
-
- // Start up the replica set. We want a stable topology, so make the secondary unelectable.
- const replTest = new ReplSetTest({name: collName, nodes: 2});
- replTest.startSet();
- let config = replTest.getReplSetConfig();
- config.members[1].priority = 0;
- replTest.initiate(config);
-
- const primary = replTest.getPrimary();
- const secondary = replTest.getSecondary();
-
- // Set slaveOk=true so that normal read commands would be allowed on the secondary.
- secondary.setSlaveOk(true);
-
- // Create a test collection that we can run commands against.
- const primaryDB = primary.getDB(dbName);
- assert.commandWorked(primary.getDB(dbName).createCollection(collName));
- assert.commandWorked(primaryDB.runCommand({
- createIndexes: collName,
- indexes: [
- {name: "geo_2d", key: {geo: "2d"}},
- {key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}
- ]
- }));
- replTest.awaitLastOpCommitted();
-
- /**
- * Verify that all given commands are disallowed from starting a transaction on a secondary by
- * checking that each command fails with the expected error code.
- */
- function testCommands(session, commands, expectedErrorCode, readPref) {
- const sessionDb = session.getDatabase(dbName);
- for (let i = 0; i < commands.length; i++) {
- session.startTransaction();
- // Use a read preference that would normally allow read commands to run on secondaries.
- if (readPref !== null) {
- session.getOptions().setReadPreference(readPref);
- }
- const cmdObject = commands[i];
-
- jsTestLog("Trying to start transaction on secondary with command: " +
- tojson(cmdObject));
- assert.commandFailedWithCode(sessionDb.runCommand(cmdObject), expectedErrorCode);
-
- // Call abort for good measure, even though the transaction should have already been
- // aborted on the server.
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NotMaster);
- }
- }
-
- //
- // Make sure transactions are disallowed on secondaries.
- //
-
- // Initiate a session on the secondary.
- const sessionOptions = {causalConsistency: false};
- const secondarySession = secondary.getDB(dbName).getMongo().startSession(sessionOptions);
-
- // Test read commands that are supported in transactions.
- let readCommands = [
- {find: collName},
- {aggregate: collName, pipeline: [{$project: {_id: 1}}], cursor: {}},
- {distinct: collName, key: "_id"},
- {geoSearch: collName, near: [0, 0]}
- ];
-
- jsTestLog("Testing read commands.");
- // Make sure read commands can not start transactions with any supported read preference.
- testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondary");
- testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondaryPreferred");
- testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "primaryPreferred");
- testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, null);
-
- // Test one write command. Normal write commands should already be
- // disallowed on secondaries so we don't test them exhaustively here.
- let writeCommands = [{insert: collName, documents: [{_id: 0}]}];
-
- jsTestLog("Testing write commands.");
- testCommands(secondarySession, writeCommands, ErrorCodes.NotMaster, "secondary");
-
- secondarySession.endSession();
+"use strict";
+
+// In 4.0, we allow read-only transactions on secondaries when test commands are enabled, so we
+// disable them in this test, to test that transactions on secondaries will be disallowed
+// for production users.
+jsTest.setOption('enableTestCommands', false);
+TestData.roleGraphInvalidationIsFatal = false;
+TestData.authenticationDatabase = "local";
+
+const dbName = "test";
+const collName = "transactions_only_allowed_on_primaries";
+
+// Start up the replica set. We want a stable topology, so make the secondary unelectable.
+const replTest = new ReplSetTest({name: collName, nodes: 2});
+replTest.startSet();
+let config = replTest.getReplSetConfig();
+config.members[1].priority = 0;
+replTest.initiate(config);
+
+const primary = replTest.getPrimary();
+const secondary = replTest.getSecondary();
+
+// Set slaveOk=true so that normal read commands would be allowed on the secondary.
+secondary.setSlaveOk(true);
+
+// Create a test collection that we can run commands against.
+const primaryDB = primary.getDB(dbName);
+assert.commandWorked(primary.getDB(dbName).createCollection(collName));
+assert.commandWorked(primaryDB.runCommand({
+ createIndexes: collName,
+ indexes: [
+ {name: "geo_2d", key: {geo: "2d"}},
+ {key: {haystack: "geoHaystack", a: 1}, name: "haystack_geo", bucketSize: 1}
+ ]
+}));
+replTest.awaitLastOpCommitted();
- //
- // Make sure transactions are allowed on primaries with any valid read preference.
- //
-
- const primarySession = primary.getDB(dbName).getMongo().startSession(sessionOptions);
- const primarySessionDb = primarySession.getDatabase(dbName);
-
- primarySession.startTransaction();
- assert.commandWorked(
- primarySessionDb.runCommand({find: collName, $readPreference: {mode: "primary"}}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
-
- primarySession.startTransaction();
- assert.commandWorked(
- primarySessionDb.runCommand({find: collName, $readPreference: {mode: "primaryPreferred"}}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
-
- primarySession.startTransaction();
- assert.commandWorked(primarySessionDb.runCommand(
- {find: collName, $readPreference: {mode: "secondaryPreferred"}}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
-
- primarySession.startTransaction();
- assert.commandWorked(
- primarySessionDb.runCommand({find: collName, $readPreference: {mode: "nearest"}}));
- assert.commandWorked(primarySession.commitTransaction_forTesting());
+/**
+ * Verify that all given commands are disallowed from starting a transaction on a secondary by
+ * checking that each command fails with the expected error code.
+ */
+function testCommands(session, commands, expectedErrorCode, readPref) {
+ const sessionDb = session.getDatabase(dbName);
+ for (let i = 0; i < commands.length; i++) {
+ session.startTransaction();
+ // Use a read preference that would normally allow read commands to run on secondaries.
+ if (readPref !== null) {
+ session.getOptions().setReadPreference(readPref);
+ }
+ const cmdObject = commands[i];
- primarySession.endSession();
+ jsTestLog("Trying to start transaction on secondary with command: " + tojson(cmdObject));
+ assert.commandFailedWithCode(sessionDb.runCommand(cmdObject), expectedErrorCode);
- replTest.stopSet();
+ // Call abort for good measure, even though the transaction should have already been
+ // aborted on the server.
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NotMaster);
+ }
+}
+
+//
+// Make sure transactions are disallowed on secondaries.
+//
+
+// Initiate a session on the secondary.
+const sessionOptions = {
+ causalConsistency: false
+};
+const secondarySession = secondary.getDB(dbName).getMongo().startSession(sessionOptions);
+
+// Test read commands that are supported in transactions.
+let readCommands = [
+ {find: collName},
+ {aggregate: collName, pipeline: [{$project: {_id: 1}}], cursor: {}},
+ {distinct: collName, key: "_id"},
+ {geoSearch: collName, near: [0, 0]}
+];
+
+jsTestLog("Testing read commands.");
+// Make sure read commands can not start transactions with any supported read preference.
+testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondary");
+testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "secondaryPreferred");
+testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, "primaryPreferred");
+testCommands(secondarySession, readCommands, ErrorCodes.NotMaster, null);
+
+// Test one write command. Normal write commands should already be
+// disallowed on secondaries so we don't test them exhaustively here.
+let writeCommands = [{insert: collName, documents: [{_id: 0}]}];
+
+jsTestLog("Testing write commands.");
+testCommands(secondarySession, writeCommands, ErrorCodes.NotMaster, "secondary");
+
+secondarySession.endSession();
+
+//
+// Make sure transactions are allowed on primaries with any valid read preference.
+//
+
+const primarySession = primary.getDB(dbName).getMongo().startSession(sessionOptions);
+const primarySessionDb = primarySession.getDatabase(dbName);
+
+primarySession.startTransaction();
+assert.commandWorked(
+ primarySessionDb.runCommand({find: collName, $readPreference: {mode: "primary"}}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+
+primarySession.startTransaction();
+assert.commandWorked(
+ primarySessionDb.runCommand({find: collName, $readPreference: {mode: "primaryPreferred"}}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+
+primarySession.startTransaction();
+assert.commandWorked(
+ primarySessionDb.runCommand({find: collName, $readPreference: {mode: "secondaryPreferred"}}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+
+primarySession.startTransaction();
+assert.commandWorked(
+ primarySessionDb.runCommand({find: collName, $readPreference: {mode: "nearest"}}));
+assert.commandWorked(primarySession.commitTransaction_forTesting());
+
+primarySession.endSession();
+
+replTest.stopSet();
}());
diff --git a/jstests/replsets/transactions_reaped_with_tickets_exhausted.js b/jstests/replsets/transactions_reaped_with_tickets_exhausted.js
index 905890af13d..3d46be27be7 100644
--- a/jstests/replsets/transactions_reaped_with_tickets_exhausted.js
+++ b/jstests/replsets/transactions_reaped_with_tickets_exhausted.js
@@ -5,91 +5,90 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/parallelTester.js"); // for ScopedThread
+load("jstests/libs/parallelTester.js"); // for ScopedThread
- // We set the number of write tickets to be a small value in order to avoid needing to spawn a
- // large number of threads to exhaust all of the available ones.
- const kNumWriteTickets = 5;
+// We set the number of write tickets to be a small value in order to avoid needing to spawn a
+// large number of threads to exhaust all of the available ones.
+const kNumWriteTickets = 5;
- const rst = new ReplSetTest({
- nodes: 1,
- nodeOptions: {
- setParameter: {
- wiredTigerConcurrentWriteTransactions: kNumWriteTickets,
+const rst = new ReplSetTest({
+ nodes: 1,
+ nodeOptions: {
+ setParameter: {
+ wiredTigerConcurrentWriteTransactions: kNumWriteTickets,
- // Setting a transaction lifetime of 1 hour to make sure the transaction reaper
- // doesn't abort the transaction.
- transactionLifetimeLimitSeconds: 3600,
- }
+ // Setting a transaction lifetime of 1 hour to make sure the transaction reaper
+ // doesn't abort the transaction.
+ transactionLifetimeLimitSeconds: 3600,
}
- });
- rst.startSet();
- rst.initiate();
+ }
+});
+rst.startSet();
+rst.initiate();
- const primary = rst.getPrimary();
- const db = primary.getDB("test");
+const primary = rst.getPrimary();
+const db = primary.getDB("test");
- const session = primary.startSession({causalConsistency: false});
- const sessionDb = session.getDatabase("test");
+const session = primary.startSession({causalConsistency: false});
+const sessionDb = session.getDatabase("test");
- assert.commandWorked(db.runCommand({create: "mycoll"}));
+assert.commandWorked(db.runCommand({create: "mycoll"}));
- session.startTransaction();
- assert.commandWorked(sessionDb.mycoll.insert({}));
+session.startTransaction();
+assert.commandWorked(sessionDb.mycoll.insert({}));
- const threads = [];
+const threads = [];
- for (let i = 0; i < kNumWriteTickets; ++i) {
- const thread = new ScopedThread(function(host) {
- try {
- const conn = new Mongo(host);
- const db = conn.getDB("test");
+for (let i = 0; i < kNumWriteTickets; ++i) {
+ const thread = new ScopedThread(function(host) {
+ try {
+ const conn = new Mongo(host);
+ const db = conn.getDB("test");
- // Dropping a collection requires a database X lock and therefore blocks behind the
- // transaction committing or aborting.
- db.mycoll.drop();
+ // Dropping a collection requires a database X lock and therefore blocks behind the
+ // transaction committing or aborting.
+ db.mycoll.drop();
- return {ok: 1};
- } catch (e) {
- return {ok: 0, error: e.toString(), stack: e.stack};
- }
- }, primary.host);
+ return {ok: 1};
+ } catch (e) {
+ return {ok: 0, error: e.toString(), stack: e.stack};
+ }
+ }, primary.host);
+
+ threads.push(thread);
+ thread.start();
+}
+
+// We wait until all of the drop commands are waiting for a lock to know that we've exhausted
+// all of the available write tickets.
+assert.soon(
+ () => {
+ const ops = db.currentOp({"command.drop": "mycoll", waitingForLock: true});
+ return ops.inprog.length === kNumWriteTickets;
+ },
+ () => {
+ return `Didn't find ${kNumWriteTickets} drop commands running: ` + tojson(db.currentOp());
+ });
- threads.push(thread);
- thread.start();
- }
+// Attempting to perform another operation inside of the transaction will block and should
+// cause it to be aborted implicity.
+assert.commandFailedWithCode(sessionDb.mycoll.insert({}), ErrorCodes.LockTimeout);
- // We wait until all of the drop commands are waiting for a lock to know that we've exhausted
- // all of the available write tickets.
- assert.soon(
- () => {
- const ops = db.currentOp({"command.drop": "mycoll", waitingForLock: true});
- return ops.inprog.length === kNumWriteTickets;
- },
- () => {
- return `Didn't find ${kNumWriteTickets} drop commands running: ` +
- tojson(db.currentOp());
- });
-
- // Attempting to perform another operation inside of the transaction will block and should
- // cause it to be aborted implicity.
- assert.commandFailedWithCode(sessionDb.mycoll.insert({}), ErrorCodes.LockTimeout);
-
- for (let thread of threads) {
- thread.join();
- }
+for (let thread of threads) {
+ thread.join();
+}
- for (let thread of threads) {
- assert.commandWorked(thread.returnData());
- }
+for (let thread of threads) {
+ assert.commandWorked(thread.returnData());
+}
- // Transaction should already be aborted.
- let res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert(res.errmsg.match(/Transaction .* has been aborted/), res.errmsg);
+// Transaction should already be aborted.
+let res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+assert(res.errmsg.match(/Transaction .* has been aborted/), res.errmsg);
- session.endSession();
- rst.stopSet();
+session.endSession();
+rst.stopSet();
})();
diff --git a/jstests/replsets/transactions_wait_for_write_concern.js b/jstests/replsets/transactions_wait_for_write_concern.js
index bf08d30f1f5..12d1154a28a 100644
--- a/jstests/replsets/transactions_wait_for_write_concern.js
+++ b/jstests/replsets/transactions_wait_for_write_concern.js
@@ -9,196 +9,196 @@
* @tags: [uses_transactions, uses_prepare_transaction]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js");
- load("jstests/core/txns/libs/prepare_helpers.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/core/txns/libs/prepare_helpers.js");
- const dbName = "test";
- const collNameBase = "coll";
+const dbName = "test";
+const collNameBase = "coll";
- const rst = new ReplSetTest({
- nodes: [{}, {rsConfig: {priority: 0}}],
+const rst = new ReplSetTest({
+ nodes: [{}, {rsConfig: {priority: 0}}],
+});
+rst.startSet();
+rst.initiate();
+
+const primary = rst.getPrimary();
+const primaryDB = primary.getDB(dbName);
+
+const failTimeoutMS = 1000;
+const successTimeoutMS = ReplSetTest.kDefaultTimeoutMS;
+
+function runTest(readConcernLevel) {
+ jsTestLog("Testing " + readConcernLevel);
+
+ const collName = `${collNameBase}_${readConcernLevel}`;
+ assert.commandWorked(primaryDB[collName].insert(
+ [{x: 1}, {x: 2}, {x: 3}, {x: 4}, {x: 5}, {x: 6}], {writeConcern: {w: "majority"}}));
+
+ jsTestLog("Unprepared Abort Setup");
+ const mongo1 = new Mongo(primary.host);
+ const session1 = mongo1.startSession();
+ const sessionDB1 = session1.getDatabase(dbName);
+ session1.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate1 = {update: collName, updates: [{q: {x: 1}, u: {$set: {x: 1}}}]};
+ printjson(assert.commandWorked(sessionDB1.runCommand(fruitlessUpdate1)));
+
+ jsTestLog("Prepared Abort Setup");
+ const mongo2 = new Mongo(primary.host);
+ const session2 = mongo2.startSession();
+ const sessionDB2 = session2.getDatabase(dbName);
+ session2.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate2 = {update: collName, updates: [{q: {x: 2}, u: {$set: {x: 2}}}]};
+ printjson(assert.commandWorked(sessionDB2.runCommand(fruitlessUpdate2)));
+ PrepareHelpers.prepareTransaction(session2);
+
+ jsTestLog("Prepare Setup");
+ const mongo3 = new Mongo(primary.host);
+ const session3 = mongo3.startSession();
+ const sessionDB3 = session3.getDatabase(dbName);
+ session3.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate3 = {update: collName, updates: [{q: {x: 3}, u: {$set: {x: 3}}}]};
+ printjson(assert.commandWorked(sessionDB3.runCommand(fruitlessUpdate3)));
+
+ jsTestLog("Unprepared Commit Setup");
+ const mongo4 = new Mongo(primary.host);
+ const session4 = mongo4.startSession();
+ const sessionDB4 = session4.getDatabase(dbName);
+ session4.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate4 = {update: collName, updates: [{q: {x: 4}, u: {$set: {x: 4}}}]};
+ printjson(assert.commandWorked(sessionDB4.runCommand(fruitlessUpdate4)));
+
+ jsTestLog("Prepared Commit Setup");
+ const mongo5 = new Mongo(primary.host);
+ const session5 = mongo5.startSession();
+ const sessionDB5 = session5.getDatabase(dbName);
+ session5.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate5 = {update: collName, updates: [{q: {x: 5}, u: {$set: {x: 5}}}]};
+ printjson(assert.commandWorked(sessionDB5.runCommand(fruitlessUpdate5)));
+ let prepareTS5 = PrepareHelpers.prepareTransaction(session5);
+
+ jsTestLog("Unprepared Abort On Used Connection Setup");
+ const session6 = primary.getDB("admin").getMongo().startSession();
+ const sessionDB6 = session6.getDatabase(dbName);
+ session6.startTransaction({
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ const fruitlessUpdate6 = {update: collName, updates: [{q: {x: 6}, u: {$set: {x: 6}}}]};
+ printjson(assert.commandWorked(sessionDB6.runCommand(fruitlessUpdate6)));
+
+ jsTestLog("Stop replication");
+ stopReplicationOnSecondaries(rst);
+
+ jsTestLog("Advance OpTime on primary, with replication stopped");
+
+ printjson(assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{}]})));
+
+ jsTestLog("Run test commands, with replication stopped");
+
+ jsTestLog("Unprepared Abort Test");
+ assert.commandWorked(session1.abortTransaction_forTesting());
+
+ jsTestLog("Prepared Abort Test");
+ assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Prepare Test");
+ assert.commandFailedWithCode(
+ session3.getDatabase('admin').adminCommand(
+ {prepareTransaction: 1, writeConcern: {w: "majority", wtimeout: failTimeoutMS}}),
+ ErrorCodes.WriteConcernFailed);
+ assert.commandFailedWithCode(session3.abortTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Unprepared Commit Test");
+ assert.commandFailedWithCode(session4.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Prepared Commit Test");
+ assert.commandFailedWithCode(session5.getDatabase('admin').adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTS5,
+ writeConcern: {w: "majority", wtimeout: failTimeoutMS}
+ }),
+ ErrorCodes.WriteConcernFailed);
+ // Send commit with the shell helper to reset the shell's state.
+ assert.commandFailedWithCode(session5.commitTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Unprepared Abort On Used Connection Test");
+ assert.commandFailedWithCode(session6.abortTransaction_forTesting(),
+ ErrorCodes.WriteConcernFailed);
+
+ jsTestLog("Restart replication");
+ restartReplicationOnSecondaries(rst);
+
+ jsTestLog("Try transaction with replication enabled");
+
+ // Unprepared Abort.
+ session1.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ assert.commandWorked(sessionDB1.runCommand(fruitlessUpdate1));
+ assert.commandWorked(session1.abortTransaction_forTesting());
+
+ // Prepared Abort.
+ session2.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ assert.commandWorked(sessionDB2.runCommand(fruitlessUpdate2));
+ PrepareHelpers.prepareTransaction(session2);
+ assert.commandWorked(session2.abortTransaction_forTesting());
+
+ // Testing prepare is no different then prepared abort or prepared commit.
+
+ // Unprepared Commit.
+ session4.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
+ });
+ assert.commandWorked(sessionDB4.runCommand(fruitlessUpdate4));
+ assert.commandWorked(session4.commitTransaction_forTesting());
+
+ // Prepared Commit.
+ session5.startTransaction({
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS},
+ readConcern: {level: readConcernLevel}
});
- rst.startSet();
- rst.initiate();
-
- const primary = rst.getPrimary();
- const primaryDB = primary.getDB(dbName);
-
- const failTimeoutMS = 1000;
- const successTimeoutMS = ReplSetTest.kDefaultTimeoutMS;
-
- function runTest(readConcernLevel) {
- jsTestLog("Testing " + readConcernLevel);
-
- const collName = `${collNameBase}_${readConcernLevel}`;
- assert.commandWorked(primaryDB[collName].insert(
- [{x: 1}, {x: 2}, {x: 3}, {x: 4}, {x: 5}, {x: 6}], {writeConcern: {w: "majority"}}));
-
- jsTestLog("Unprepared Abort Setup");
- const mongo1 = new Mongo(primary.host);
- const session1 = mongo1.startSession();
- const sessionDB1 = session1.getDatabase(dbName);
- session1.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate1 = {update: collName, updates: [{q: {x: 1}, u: {$set: {x: 1}}}]};
- printjson(assert.commandWorked(sessionDB1.runCommand(fruitlessUpdate1)));
-
- jsTestLog("Prepared Abort Setup");
- const mongo2 = new Mongo(primary.host);
- const session2 = mongo2.startSession();
- const sessionDB2 = session2.getDatabase(dbName);
- session2.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate2 = {update: collName, updates: [{q: {x: 2}, u: {$set: {x: 2}}}]};
- printjson(assert.commandWorked(sessionDB2.runCommand(fruitlessUpdate2)));
- PrepareHelpers.prepareTransaction(session2);
-
- jsTestLog("Prepare Setup");
- const mongo3 = new Mongo(primary.host);
- const session3 = mongo3.startSession();
- const sessionDB3 = session3.getDatabase(dbName);
- session3.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate3 = {update: collName, updates: [{q: {x: 3}, u: {$set: {x: 3}}}]};
- printjson(assert.commandWorked(sessionDB3.runCommand(fruitlessUpdate3)));
-
- jsTestLog("Unprepared Commit Setup");
- const mongo4 = new Mongo(primary.host);
- const session4 = mongo4.startSession();
- const sessionDB4 = session4.getDatabase(dbName);
- session4.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate4 = {update: collName, updates: [{q: {x: 4}, u: {$set: {x: 4}}}]};
- printjson(assert.commandWorked(sessionDB4.runCommand(fruitlessUpdate4)));
-
- jsTestLog("Prepared Commit Setup");
- const mongo5 = new Mongo(primary.host);
- const session5 = mongo5.startSession();
- const sessionDB5 = session5.getDatabase(dbName);
- session5.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate5 = {update: collName, updates: [{q: {x: 5}, u: {$set: {x: 5}}}]};
- printjson(assert.commandWorked(sessionDB5.runCommand(fruitlessUpdate5)));
- let prepareTS5 = PrepareHelpers.prepareTransaction(session5);
-
- jsTestLog("Unprepared Abort On Used Connection Setup");
- const session6 = primary.getDB("admin").getMongo().startSession();
- const sessionDB6 = session6.getDatabase(dbName);
- session6.startTransaction({
- writeConcern: {w: "majority", wtimeout: failTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- const fruitlessUpdate6 = {update: collName, updates: [{q: {x: 6}, u: {$set: {x: 6}}}]};
- printjson(assert.commandWorked(sessionDB6.runCommand(fruitlessUpdate6)));
-
- jsTestLog("Stop replication");
- stopReplicationOnSecondaries(rst);
-
- jsTestLog("Advance OpTime on primary, with replication stopped");
-
- printjson(assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{}]})));
-
- jsTestLog("Run test commands, with replication stopped");
-
- jsTestLog("Unprepared Abort Test");
- assert.commandWorked(session1.abortTransaction_forTesting());
-
- jsTestLog("Prepared Abort Test");
- assert.commandFailedWithCode(session2.abortTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Prepare Test");
- assert.commandFailedWithCode(
- session3.getDatabase('admin').adminCommand(
- {prepareTransaction: 1, writeConcern: {w: "majority", wtimeout: failTimeoutMS}}),
- ErrorCodes.WriteConcernFailed);
- assert.commandFailedWithCode(session3.abortTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Unprepared Commit Test");
- assert.commandFailedWithCode(session4.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Prepared Commit Test");
- assert.commandFailedWithCode(session5.getDatabase('admin').adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTS5,
- writeConcern: {w: "majority", wtimeout: failTimeoutMS}
- }),
- ErrorCodes.WriteConcernFailed);
- // Send commit with the shell helper to reset the shell's state.
- assert.commandFailedWithCode(session5.commitTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Unprepared Abort On Used Connection Test");
- assert.commandFailedWithCode(session6.abortTransaction_forTesting(),
- ErrorCodes.WriteConcernFailed);
-
- jsTestLog("Restart replication");
- restartReplicationOnSecondaries(rst);
-
- jsTestLog("Try transaction with replication enabled");
-
- // Unprepared Abort.
- session1.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- assert.commandWorked(sessionDB1.runCommand(fruitlessUpdate1));
- assert.commandWorked(session1.abortTransaction_forTesting());
-
- // Prepared Abort.
- session2.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- assert.commandWorked(sessionDB2.runCommand(fruitlessUpdate2));
- PrepareHelpers.prepareTransaction(session2);
- assert.commandWorked(session2.abortTransaction_forTesting());
-
- // Testing prepare is no different then prepared abort or prepared commit.
-
- // Unprepared Commit.
- session4.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- assert.commandWorked(sessionDB4.runCommand(fruitlessUpdate4));
- assert.commandWorked(session4.commitTransaction_forTesting());
-
- // Prepared Commit.
- session5.startTransaction({
- writeConcern: {w: "majority", wtimeout: successTimeoutMS},
- readConcern: {level: readConcernLevel}
- });
- assert.commandWorked(sessionDB5.runCommand(fruitlessUpdate5));
- prepareTS5 = PrepareHelpers.prepareTransaction(session5);
- assert.commandWorked(session5.getDatabase('admin').adminCommand({
- commitTransaction: 1,
- commitTimestamp: prepareTS5,
- writeConcern: {w: "majority", wtimeout: successTimeoutMS}
- }));
- // Send commit with the shell helper to reset the shell's state.
- assert.commandWorked(session5.commitTransaction_forTesting());
-
- // Unprepared abort already is using a "used connection" for this success test.
- }
-
- runTest("local");
- runTest("majority");
- runTest("snapshot");
-
- rst.stopSet();
+ assert.commandWorked(sessionDB5.runCommand(fruitlessUpdate5));
+ prepareTS5 = PrepareHelpers.prepareTransaction(session5);
+ assert.commandWorked(session5.getDatabase('admin').adminCommand({
+ commitTransaction: 1,
+ commitTimestamp: prepareTS5,
+ writeConcern: {w: "majority", wtimeout: successTimeoutMS}
+ }));
+ // Send commit with the shell helper to reset the shell's state.
+ assert.commandWorked(session5.commitTransaction_forTesting());
+
+ // Unprepared abort already is using a "used connection" for this success test.
+}
+
+runTest("local");
+runTest("majority");
+runTest("snapshot");
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/transient_txn_error_labels.js b/jstests/replsets/transient_txn_error_labels.js
index defea4c774b..0a886856beb 100644
--- a/jstests/replsets/transient_txn_error_labels.js
+++ b/jstests/replsets/transient_txn_error_labels.js
@@ -1,244 +1,240 @@
// Test TransientTransactionErrors error label in transactions.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load("jstests/libs/write_concern_util.js");
- load("jstests/libs/parallelTester.js"); // For ScopedThread.
-
- const dbName = "test";
- const collName = "no_error_labels_outside_txn";
-
- // We are testing coordinateCommitTransaction, which requires the nodes to be started with
- // --shardsvr.
- const st = new ShardingTest(
- {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
- const primary = st.rs0.getPrimary();
- const secondary = st.rs0.getSecondary();
-
- const testDB = primary.getDB(dbName);
- const adminDB = testDB.getSiblingDB("admin");
- const testColl = testDB.getCollection(collName);
-
- const sessionOptions = {causalConsistency: false};
- let session = primary.startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
- let sessionColl = sessionDb.getCollection(collName);
- let secondarySession = secondary.startSession(sessionOptions);
- let secondarySessionDb = secondarySession.getDatabase(dbName);
-
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- jsTest.log("Insert inside a transaction on secondary should fail but return error labels");
- let txnNumber = 0;
- let res = secondarySessionDb.runCommand({
- insert: collName,
- documents: [{_id: "insert-1"}],
- readConcern: {level: "snapshot"},
- txnNumber: NumberLong(txnNumber),
- startTransaction: true,
- autocommit: false
+"use strict";
+
+load("jstests/libs/write_concern_util.js");
+load("jstests/libs/parallelTester.js"); // For ScopedThread.
+
+const dbName = "test";
+const collName = "no_error_labels_outside_txn";
+
+// We are testing coordinateCommitTransaction, which requires the nodes to be started with
+// --shardsvr.
+const st = new ShardingTest(
+ {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
+const primary = st.rs0.getPrimary();
+const secondary = st.rs0.getSecondary();
+
+const testDB = primary.getDB(dbName);
+const adminDB = testDB.getSiblingDB("admin");
+const testColl = testDB.getCollection(collName);
+
+const sessionOptions = {
+ causalConsistency: false
+};
+let session = primary.startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+let sessionColl = sessionDb.getCollection(collName);
+let secondarySession = secondary.startSession(sessionOptions);
+let secondarySessionDb = secondarySession.getDatabase(dbName);
+
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+jsTest.log("Insert inside a transaction on secondary should fail but return error labels");
+let txnNumber = 0;
+let res = secondarySessionDb.runCommand({
+ insert: collName,
+ documents: [{_id: "insert-1"}],
+ readConcern: {level: "snapshot"},
+ txnNumber: NumberLong(txnNumber),
+ startTransaction: true,
+ autocommit: false
+});
+assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+
+jsTest.log("Insert outside a transaction on secondary should fail but not return error labels");
+txnNumber++;
+// Insert as a retryable write.
+res = secondarySessionDb.runCommand(
+ {insert: collName, documents: [{_id: "insert-1"}], txnNumber: NumberLong(txnNumber)});
+
+assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert(!res.hasOwnProperty("errorLabels"));
+secondarySession.endSession();
+
+jsTest.log("failCommand should be able to return errors with TransientTransactionError");
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.WriteConflict, failCommands: ["insert"]}
+}));
+session.startTransaction();
+jsTest.log("WriteCommandError should have error labels inside transactions.");
+res = sessionColl.insert({_id: "write-fail-point"});
+assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
+assert(res instanceof WriteCommandError);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+res = testColl.insert({_id: "write-fail-point-outside-txn"});
+jsTest.log("WriteCommandError should not have error labels outside transactions.");
+// WriteConflict will not be returned outside transactions in real cases, but it's fine for
+// testing purpose.
+assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
+assert(res instanceof WriteCommandError);
+assert(!res.hasOwnProperty("errorLabels"));
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTest.log("WriteConflict returned by commitTransaction command is TransientTransactionError");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "commitTransaction-fail-point"}));
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.WriteConflict, failCommands: ["commitTransaction"]}
+}));
+res = session.commitTransaction_forTesting();
+assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+jsTest.log("NotMaster returned by commitTransaction command is not TransientTransactionError");
+// commitTransaction will attempt to perform a noop write in response to a NoSuchTransaction
+// error and non-empty writeConcern. This will throw NotMaster.
+res = secondarySessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(secondarySession.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+});
+assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert(!res.hasOwnProperty("errorLabels"));
+
+jsTest.log(
+ "NotMaster returned by coordinateCommitTransaction command is not TransientTransactionError");
+// coordinateCommitTransaction will attempt to perform a noop write in response to a
+// NoSuchTransaction error and non-empty writeConcern. This will throw NotMaster.
+res = secondarySessionDb.adminCommand({
+ coordinateCommitTransaction: 1,
+ participants: [],
+ txnNumber: NumberLong(secondarySession.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: {w: "majority"}
+});
+assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
+assert(!res.hasOwnProperty("errorLabels"));
+
+jsTest.log("ShutdownInProgress returned by write commands is TransientTransactionError");
+session.startTransaction();
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["insert"]}
+}));
+res = sessionColl.insert({_id: "commitTransaction-fail-point"});
+assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
+assert(res instanceof WriteCommandError);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+
+jsTest.log(
+ "ShutdownInProgress returned by commitTransaction command is not TransientTransactionError");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "commitTransaction-fail-point"}));
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["commitTransaction"]}
+}));
+res = session.commitTransaction_forTesting();
+assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
+assert(!res.hasOwnProperty("errorLabels"));
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+jsTest.log(
+ "ShutdownInProgress returned by coordinateCommitTransaction command is not TransientTransactionError");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "coordinateCommitTransaction-fail-point"}));
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["coordinateCommitTransaction"]}
+}));
+res = sessionDb.adminCommand({
+ coordinateCommitTransaction: 1,
+ participants: [],
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false
+});
+assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
+assert(!res.hasOwnProperty("errorLabels"));
+assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+jsTest.log("LockTimeout should be TransientTransactionError");
+// Start a transaction to hold the DBLock in IX mode so that drop will be blocked.
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "lock-timeout-1"}));
+function dropCmdFunc(primaryHost, dbName, collName) {
+ const primary = new Mongo(primaryHost);
+ return primary.getDB(dbName).runCommand({drop: collName, writeConcern: {w: "majority"}});
+}
+const thread = new ScopedThread(dropCmdFunc, primary.host, dbName, collName);
+thread.start();
+// Wait for the drop to have a pending MODE_X lock on the database.
+assert.soon(
+ function() {
+ return adminDB
+ .aggregate([
+ {$currentOp: {}},
+ {$match: {"command.drop": collName, waitingForLock: true}}
+ ])
+ .itcount() === 1;
+ },
+ function() {
+ return "Failed to find drop in currentOp output: " +
+ tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
});
- assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
-
- jsTest.log("Insert outside a transaction on secondary should fail but not return error labels");
- txnNumber++;
- // Insert as a retryable write.
- res = secondarySessionDb.runCommand(
- {insert: collName, documents: [{_id: "insert-1"}], txnNumber: NumberLong(txnNumber)});
-
- assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
- assert(!res.hasOwnProperty("errorLabels"));
- secondarySession.endSession();
-
- jsTest.log("failCommand should be able to return errors with TransientTransactionError");
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.WriteConflict, failCommands: ["insert"]}
- }));
- session.startTransaction();
- jsTest.log("WriteCommandError should have error labels inside transactions.");
- res = sessionColl.insert({_id: "write-fail-point"});
- assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
- assert(res instanceof WriteCommandError);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- res = testColl.insert({_id: "write-fail-point-outside-txn"});
- jsTest.log("WriteCommandError should not have error labels outside transactions.");
- // WriteConflict will not be returned outside transactions in real cases, but it's fine for
- // testing purpose.
- assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
- assert(res instanceof WriteCommandError);
- assert(!res.hasOwnProperty("errorLabels"));
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTest.log("WriteConflict returned by commitTransaction command is TransientTransactionError");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "commitTransaction-fail-point"}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.WriteConflict, failCommands: ["commitTransaction"]}
- }));
- res = session.commitTransaction_forTesting();
- assert.commandFailedWithCode(res, ErrorCodes.WriteConflict);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- jsTest.log("NotMaster returned by commitTransaction command is not TransientTransactionError");
- // commitTransaction will attempt to perform a noop write in response to a NoSuchTransaction
- // error and non-empty writeConcern. This will throw NotMaster.
- res = secondarySessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(secondarySession.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: {w: "majority"}
- });
- assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
- assert(!res.hasOwnProperty("errorLabels"));
-
- jsTest.log(
- "NotMaster returned by coordinateCommitTransaction command is not TransientTransactionError");
- // coordinateCommitTransaction will attempt to perform a noop write in response to a
- // NoSuchTransaction error and non-empty writeConcern. This will throw NotMaster.
- res = secondarySessionDb.adminCommand({
- coordinateCommitTransaction: 1,
- participants: [],
- txnNumber: NumberLong(secondarySession.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: {w: "majority"}
- });
- assert.commandFailedWithCode(res, ErrorCodes.NotMaster);
- assert(!res.hasOwnProperty("errorLabels"));
-
- jsTest.log("ShutdownInProgress returned by write commands is TransientTransactionError");
- session.startTransaction();
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["insert"]}
- }));
- res = sessionColl.insert({_id: "commitTransaction-fail-point"});
- assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
- assert(res instanceof WriteCommandError);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- jsTest.log(
- "ShutdownInProgress returned by commitTransaction command is not TransientTransactionError");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "commitTransaction-fail-point"}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.ShutdownInProgress, failCommands: ["commitTransaction"]}
- }));
- res = session.commitTransaction_forTesting();
- assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
- assert(!res.hasOwnProperty("errorLabels"));
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- jsTest.log(
- "ShutdownInProgress returned by coordinateCommitTransaction command is not TransientTransactionError");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "coordinateCommitTransaction-fail-point"}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {
- errorCode: ErrorCodes.ShutdownInProgress,
- failCommands: ["coordinateCommitTransaction"]
- }
- }));
- res = sessionDb.adminCommand({
- coordinateCommitTransaction: 1,
- participants: [],
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false
- });
- assert.commandFailedWithCode(res, ErrorCodes.ShutdownInProgress);
- assert(!res.hasOwnProperty("errorLabels"));
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- jsTest.log("LockTimeout should be TransientTransactionError");
- // Start a transaction to hold the DBLock in IX mode so that drop will be blocked.
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "lock-timeout-1"}));
- function dropCmdFunc(primaryHost, dbName, collName) {
- const primary = new Mongo(primaryHost);
- return primary.getDB(dbName).runCommand({drop: collName, writeConcern: {w: "majority"}});
- }
- const thread = new ScopedThread(dropCmdFunc, primary.host, dbName, collName);
- thread.start();
- // Wait for the drop to have a pending MODE_X lock on the database.
- assert.soon(
- function() {
- return adminDB
- .aggregate([
- {$currentOp: {}},
- {$match: {"command.drop": collName, waitingForLock: true}}
- ])
- .itcount() === 1;
- },
- function() {
- return "Failed to find drop in currentOp output: " +
- tojson(adminDB.aggregate([{$currentOp: {}}]).toArray());
- });
- // Start another transaction in a new session, which cannot acquire the database lock in time.
- let sessionOther = primary.startSession(sessionOptions);
- sessionOther.startTransaction();
- res = sessionOther.getDatabase(dbName).getCollection(collName).insert({_id: "lock-timeout-2"});
- assert.commandFailedWithCode(res, ErrorCodes.LockTimeout);
- assert(res instanceof WriteCommandError);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandFailedWithCode(sessionOther.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(session.abortTransaction_forTesting());
- thread.join();
- assert.commandWorked(thread.returnData());
-
- // Re-create the collection for later test cases.
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- jsTest.log("Network errors for in-progress statements should be transient");
- session.startTransaction();
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.HostUnreachable, failCommands: ["aggregate"]}
- }));
- res = sessionDb.runCommand({aggregate: collName, pipeline: [{$match: {}}], cursor: {}});
- assert.commandFailedWithCode(res, ErrorCodes.HostUnreachable);
- assert.eq(res.errorLabels, ["TransientTransactionError"]);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- jsTest.log("Network errors for commit should not be transient");
- session.startTransaction();
- assert.commandWorked(sessionColl.insert({_id: "commitTransaction-network-error"}));
- assert.commandWorked(testDB.adminCommand({
- configureFailPoint: "failCommand",
- mode: "alwaysOn",
- data: {errorCode: ErrorCodes.HostUnreachable, failCommands: ["commitTransaction"]}
- }));
- res = sessionDb.adminCommand({
- commitTransaction: 1,
- txnNumber: NumberLong(session.getTxnNumber_forTesting()),
- autocommit: false
- });
- assert.commandFailedWithCode(res, ErrorCodes.HostUnreachable);
- assert(!res.hasOwnProperty("errorLabels"), tojson(res));
- assert.commandWorked(session.abortTransaction_forTesting());
- assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
-
- session.endSession();
-
- st.stop();
+// Start another transaction in a new session, which cannot acquire the database lock in time.
+let sessionOther = primary.startSession(sessionOptions);
+sessionOther.startTransaction();
+res = sessionOther.getDatabase(dbName).getCollection(collName).insert({_id: "lock-timeout-2"});
+assert.commandFailedWithCode(res, ErrorCodes.LockTimeout);
+assert(res instanceof WriteCommandError);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+assert.commandFailedWithCode(sessionOther.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+assert.commandWorked(session.abortTransaction_forTesting());
+thread.join();
+assert.commandWorked(thread.returnData());
+
+// Re-create the collection for later test cases.
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+jsTest.log("Network errors for in-progress statements should be transient");
+session.startTransaction();
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.HostUnreachable, failCommands: ["aggregate"]}
+}));
+res = sessionDb.runCommand({aggregate: collName, pipeline: [{$match: {}}], cursor: {}});
+assert.commandFailedWithCode(res, ErrorCodes.HostUnreachable);
+assert.eq(res.errorLabels, ["TransientTransactionError"]);
+assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction);
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+jsTest.log("Network errors for commit should not be transient");
+session.startTransaction();
+assert.commandWorked(sessionColl.insert({_id: "commitTransaction-network-error"}));
+assert.commandWorked(testDB.adminCommand({
+ configureFailPoint: "failCommand",
+ mode: "alwaysOn",
+ data: {errorCode: ErrorCodes.HostUnreachable, failCommands: ["commitTransaction"]}
+}));
+res = sessionDb.adminCommand({
+ commitTransaction: 1,
+ txnNumber: NumberLong(session.getTxnNumber_forTesting()),
+ autocommit: false
+});
+assert.commandFailedWithCode(res, ErrorCodes.HostUnreachable);
+assert(!res.hasOwnProperty("errorLabels"), tojson(res));
+assert.commandWorked(session.abortTransaction_forTesting());
+assert.commandWorked(testDB.adminCommand({configureFailPoint: "failCommand", mode: "off"}));
+
+session.endSession();
+
+st.stop();
}());
diff --git a/jstests/replsets/transient_txn_error_labels_with_write_concern.js b/jstests/replsets/transient_txn_error_labels_with_write_concern.js
index b422bb96ccc..54ad4f9044a 100644
--- a/jstests/replsets/transient_txn_error_labels_with_write_concern.js
+++ b/jstests/replsets/transient_txn_error_labels_with_write_concern.js
@@ -1,131 +1,135 @@
// Test TransientTransactionError error label for commands in transactions with write concern.
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load("jstests/libs/check_log.js");
- load("jstests/libs/write_concern_util.js");
- load("jstests/replsets/rslib.js");
-
- const dbName = "test";
- const collName = "transient_txn_error_labels_with_write_concern";
-
- // We are testing coordinateCommitTransaction, which requires the nodes to be started with
- // --shardsvr.
- const st = new ShardingTest(
- {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
- const rst = st.rs0;
-
- const primary = rst.getPrimary();
- const secondary = rst.getSecondary();
- assert.eq(primary, rst.nodes[0]);
- const testDB = primary.getDB(dbName);
-
- const sessionOptions = {causalConsistency: false};
- const writeConcernMajority = {w: "majority", wtimeout: 500};
-
- assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
-
- jsTest.log("Write concern errors should not have error labels");
- // Start a new session on the primary.
- let session = primary.startSession(sessionOptions);
- let sessionDb = session.getDatabase(dbName);
- let sessionColl = sessionDb.getCollection(collName);
+"use strict";
+
+load("jstests/libs/check_log.js");
+load("jstests/libs/write_concern_util.js");
+load("jstests/replsets/rslib.js");
+
+const dbName = "test";
+const collName = "transient_txn_error_labels_with_write_concern";
+
+// We are testing coordinateCommitTransaction, which requires the nodes to be started with
+// --shardsvr.
+const st = new ShardingTest(
+ {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}});
+const rst = st.rs0;
+
+const primary = rst.getPrimary();
+const secondary = rst.getSecondary();
+assert.eq(primary, rst.nodes[0]);
+const testDB = primary.getDB(dbName);
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const writeConcernMajority = {
+ w: "majority",
+ wtimeout: 500
+};
+
+assert.commandWorked(testDB.createCollection(collName, {writeConcern: {w: "majority"}}));
+
+jsTest.log("Write concern errors should not have error labels");
+// Start a new session on the primary.
+let session = primary.startSession(sessionOptions);
+let sessionDb = session.getDatabase(dbName);
+let sessionColl = sessionDb.getCollection(collName);
+stopServerReplication(rst.getSecondaries());
+session.startTransaction({writeConcern: writeConcernMajority});
+assert.commandWorked(sessionColl.insert({_id: "write-with-write-concern"}));
+let res = session.commitTransaction_forTesting();
+checkWriteConcernTimedOut(res);
+assert(!res.hasOwnProperty("code"));
+assert(!res.hasOwnProperty("errorLabels"));
+restartServerReplication(rst.getSecondaries());
+
+function runNoSuchTransactionTests(cmd, cmdName) {
+ jsTest.log("Running NoSuchTransaction tests for " + cmdName);
+ assert.commandWorked(primary.adminCommand({clearLog: "global"}));
+
+ jsTest.log(cmdName + " should wait for write concern even if it returns NoSuchTransaction");
+ rst.awaitReplication();
stopServerReplication(rst.getSecondaries());
- session.startTransaction({writeConcern: writeConcernMajority});
- assert.commandWorked(sessionColl.insert({_id: "write-with-write-concern"}));
- let res = session.commitTransaction_forTesting();
+ // Use a txnNumber that is one higher than the server has tracked.
+ res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
+ txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: writeConcernMajority
+ }));
checkWriteConcernTimedOut(res);
- assert(!res.hasOwnProperty("code"));
+ assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+
+ jsTest.log("NoSuchTransaction with write concern error is not transient");
assert(!res.hasOwnProperty("errorLabels"));
- restartServerReplication(rst.getSecondaries());
- function runNoSuchTransactionTests(cmd, cmdName) {
- jsTest.log("Running NoSuchTransaction tests for " + cmdName);
- assert.commandWorked(primary.adminCommand({clearLog: "global"}));
-
- jsTest.log(cmdName + " should wait for write concern even if it returns NoSuchTransaction");
- rst.awaitReplication();
- stopServerReplication(rst.getSecondaries());
- // Use a txnNumber that is one higher than the server has tracked.
- res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
- txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: writeConcernMajority
- }));
- checkWriteConcernTimedOut(res);
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
-
- jsTest.log("NoSuchTransaction with write concern error is not transient");
- assert(!res.hasOwnProperty("errorLabels"));
-
- jsTest.log("NoSuchTransaction without write concern error is transient");
- restartServerReplication(rst.getSecondaries());
- // Use a txnNumber that is one higher than the server has tracked.
- res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
- txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: {w: "majority"} // Wait with a long timeout.
- }));
- assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
- assert(!res.hasOwnProperty("writeConcernError"), res);
- assert.eq(res["errorLabels"], ["TransientTransactionError"], res);
-
- jsTest.log(
- "If the noop write for NoSuchTransaction cannot occur, the error is not transient");
-
- // Lock 'local' database in X mode.
- let lockShell = startParallelShell(function() {
- assert.commandFailed(db.adminCommand({
- sleep: 1,
- secs: 500,
- lock: "w",
- lockTarget: "local",
- $comment: "transient_txn_error_labels_with_write_concern lock sleep"
- }));
- }, rst.ports[0]);
-
- // Wait for sleep to appear in currentOp
- let opId = -1;
- assert.soon(function() {
- const curopRes = testDB.currentOp();
- assert.commandWorked(curopRes);
- const foundOp = curopRes["inprog"].filter(
- op => (op["ns"] == "admin.$cmd" &&
- op["command"]["$comment"] ==
- "transient_txn_error_labels_with_write_concern lock sleep"));
- if (foundOp.length == 1) {
- opId = foundOp[0]["opid"];
- }
- return (foundOp.length == 1);
- });
-
- // The server will attempt to perform a noop write, since the command returns
- // NoSuchTransaction. The noop write will time out acquiring a lock on the 'local' database.
- // This should not be a TransientTransactionError, since the server has not successfully
- // replicated a write to confirm that it is primary.
- // Use a txnNumber that is one higher than the server has tracked.
- res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
- txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
- autocommit: false,
- writeConcern: writeConcernMajority,
- maxTimeMS: 1000
+ jsTest.log("NoSuchTransaction without write concern error is transient");
+ restartServerReplication(rst.getSecondaries());
+ // Use a txnNumber that is one higher than the server has tracked.
+ res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
+ txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: {w: "majority"} // Wait with a long timeout.
+ }));
+ assert.commandFailedWithCode(res, ErrorCodes.NoSuchTransaction);
+ assert(!res.hasOwnProperty("writeConcernError"), res);
+ assert.eq(res["errorLabels"], ["TransientTransactionError"], res);
+
+ jsTest.log("If the noop write for NoSuchTransaction cannot occur, the error is not transient");
+
+ // Lock 'local' database in X mode.
+ let lockShell = startParallelShell(function() {
+ assert.commandFailed(db.adminCommand({
+ sleep: 1,
+ secs: 500,
+ lock: "w",
+ lockTarget: "local",
+ $comment: "transient_txn_error_labels_with_write_concern lock sleep"
}));
- assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
- assert(!res.hasOwnProperty("errorLabels"));
+ }, rst.ports[0]);
+
+ // Wait for sleep to appear in currentOp
+ let opId = -1;
+ assert.soon(function() {
+ const curopRes = testDB.currentOp();
+ assert.commandWorked(curopRes);
+ const foundOp = curopRes["inprog"].filter(
+ op => (op["ns"] == "admin.$cmd" &&
+ op["command"]["$comment"] ==
+ "transient_txn_error_labels_with_write_concern lock sleep"));
+ if (foundOp.length == 1) {
+ opId = foundOp[0]["opid"];
+ }
+ return (foundOp.length == 1);
+ });
+
+ // The server will attempt to perform a noop write, since the command returns
+ // NoSuchTransaction. The noop write will time out acquiring a lock on the 'local' database.
+ // This should not be a TransientTransactionError, since the server has not successfully
+ // replicated a write to confirm that it is primary.
+ // Use a txnNumber that is one higher than the server has tracked.
+ res = sessionDb.adminCommand(Object.assign(Object.assign({}, cmd), {
+ txnNumber: NumberLong(session.getTxnNumber_forTesting() + 1),
+ autocommit: false,
+ writeConcern: writeConcernMajority,
+ maxTimeMS: 1000
+ }));
+ assert.commandFailedWithCode(res, ErrorCodes.MaxTimeMSExpired);
+ assert(!res.hasOwnProperty("errorLabels"));
- assert.commandWorked(testDB.killOp(opId));
- lockShell();
+ assert.commandWorked(testDB.killOp(opId));
+ lockShell();
- rst.awaitReplication();
- }
+ rst.awaitReplication();
+}
- runNoSuchTransactionTests({commitTransaction: 1}, "commitTransaction");
+runNoSuchTransactionTests({commitTransaction: 1}, "commitTransaction");
- runNoSuchTransactionTests({coordinateCommitTransaction: 1, participants: []},
- "coordinateCommitTransaction");
+runNoSuchTransactionTests({coordinateCommitTransaction: 1, participants: []},
+ "coordinateCommitTransaction");
- session.endSession();
+session.endSession();
- st.stop();
+st.stop();
}());
diff --git a/jstests/replsets/two_nodes_priority_take_over.js b/jstests/replsets/two_nodes_priority_take_over.js
index 897a930156f..1fba2350e8f 100644
--- a/jstests/replsets/two_nodes_priority_take_over.js
+++ b/jstests/replsets/two_nodes_priority_take_over.js
@@ -8,7 +8,6 @@ if (false) {
load("jstests/replsets/rslib.js");
(function() {
-
"use strict";
var name = "two_nodes_priority_take_over";
var rst = new ReplSetTest({name: name, nodes: 2});
@@ -55,6 +54,5 @@ if (false) {
// no current candidate. If vote requests failed (wrongly) for some reason,
// nodes have to start new elections, which increase the term unnecessarily.
assert.eq(newTerm, stableTerm + 1);
-
})();
}
diff --git a/jstests/replsets/txn_override_unittests.js b/jstests/replsets/txn_override_unittests.js
index 38187c4ff92..f508afe2a99 100644
--- a/jstests/replsets/txn_override_unittests.js
+++ b/jstests/replsets/txn_override_unittests.js
@@ -28,1896 +28,1894 @@
* @tags: [requires_replication, uses_transactions]
*/
(function() {
- "use strict";
- load("jstests/libs/transactions_util.js");
- load('jstests/libs/write_concern_util.js');
-
- // Commands not to override since they can log excessively.
- const runCommandOverrideBlacklistedCommands =
- ["getCmdLineOpts", "serverStatus", "configureFailPoint"];
-
- // cmdResponseOverrides is a map from commands to responses that should be provided in lieu of
- // running the command on the server. This is mostly used for returning WriteConcernErrors
- // without running the command or returning WriteConcernErrors with top level errors.
- // {<cmdName>: {responseObj: <response object>}}
- let cmdResponseOverrides = {};
-
- // postCommandFuncs is a map from commands to functions that should be run after either mocking
- // out their response or running them on the server. This is used to inject functionality at
- // times when the test is not given control, such as when the override runs extra commands on
- // retries.
- // {<cmdName>: {func}}
- let postCommandFuncs = {};
-
- /**
- * Deletes the command override from the given command.
- */
- function clearCommandOverride(cmdName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
-
- delete cmdResponseOverrides[cmdName];
- }
+"use strict";
+load("jstests/libs/transactions_util.js");
+load('jstests/libs/write_concern_util.js');
+
+// Commands not to override since they can log excessively.
+const runCommandOverrideBlacklistedCommands =
+ ["getCmdLineOpts", "serverStatus", "configureFailPoint"];
+
+// cmdResponseOverrides is a map from commands to responses that should be provided in lieu of
+// running the command on the server. This is mostly used for returning WriteConcernErrors
+// without running the command or returning WriteConcernErrors with top level errors.
+// {<cmdName>: {responseObj: <response object>}}
+let cmdResponseOverrides = {};
+
+// postCommandFuncs is a map from commands to functions that should be run after either mocking
+// out their response or running them on the server. This is used to inject functionality at
+// times when the test is not given control, such as when the override runs extra commands on
+// retries.
+// {<cmdName>: {func}}
+let postCommandFuncs = {};
+
+/**
+ * Deletes the command override from the given command.
+ */
+function clearCommandOverride(cmdName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ delete cmdResponseOverrides[cmdName];
+}
+
+/**
+ * Deletes the post-command function for the given command.
+ */
+function clearPostCommandFunc(cmdName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ delete postCommandFuncs[cmdName];
+}
+
+/**
+ * Clears all command overrides and post-command functions.
+ */
+function clearAllCommandOverrides() {
+ cmdResponseOverrides = {};
+ postCommandFuncs = {};
+}
+
+/**
+ * Sets the provided function as the post-command function for the given command.
+ */
+function attachPostCmdFunction(cmdName, func) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ postCommandFuncs[cmdName] = func;
+}
+
+/**
+ * Sets that the given command should return the given response. The command will not actually
+ * be run.
+ */
+function setCommandMockResponse(cmdName, mockResponse) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ cmdResponseOverrides[cmdName] = {responseObj: mockResponse};
+}
+
+/**
+ * Sets that the given command should fail with ok:1 and the given write concern error.
+ * The command will not actually be run.
+ */
+function failCommandWithWCENoRun(cmdName, writeConcernErrorCode, writeConcernErrorCodeName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ cmdResponseOverrides[cmdName] = {
+ responseObj: {
+ ok: 1,
+ writeConcernError: {code: writeConcernErrorCode, codeName: writeConcernErrorCodeName}
+ }
+ };
+}
- /**
- * Deletes the post-command function for the given command.
- */
- function clearPostCommandFunc(cmdName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+/**
+ * Sets that the given command should fail with the given error and the given write concern
+ * error. The command will not actually be run.
+ */
+function failCommandWithErrorAndWCENoRun(
+ cmdName, errorCode, errorCodeName, writeConcernErrorCode, writeConcernErrorCodeName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ cmdResponseOverrides[cmdName] = {
+ responseObj: {
+ ok: 0,
+ code: errorCode,
+ codeName: errorCodeName,
+ writeConcernError: {code: writeConcernErrorCode, codeName: writeConcernErrorCodeName}
+ }
+ };
+}
- delete postCommandFuncs[cmdName];
+/**
+ * Run the post-command function for the given command, if one has been set, and clear it once
+ * used.
+ */
+function runPostCommandFunc(cmdName) {
+ assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+
+ if (postCommandFuncs[cmdName]) {
+ jsTestLog("Running post-command function for " + cmdName);
+ try {
+ postCommandFuncs[cmdName]();
+ } finally {
+ clearPostCommandFunc(cmdName);
+ }
}
+}
- /**
- * Clears all command overrides and post-command functions.
- */
- function clearAllCommandOverrides() {
- cmdResponseOverrides = {};
- postCommandFuncs = {};
+/**
+ * Overrides 'runCommand' to provide a specific pre-set response to the given command. If the
+ * command is in the blacklist, it is not overridden. Otherwise, if a command response has been
+ * specified, returns that without running the function. If a post-command function is specified
+ * for the command, runs that after the command is run. The post-command function is run
+ * regardless of whether the command response was overridden or not.
+ */
+const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
+ const cmdName = Object.keys(cmdObj)[0];
+ if (runCommandOverrideBlacklistedCommands.includes(cmdName)) {
+ return mongoRunCommandOriginal.apply(this, arguments);
}
- /**
- * Sets the provided function as the post-command function for the given command.
- */
- function attachPostCmdFunction(cmdName, func) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+ if (cmdResponseOverrides.hasOwnProperty(cmdName)) {
+ const cmdResponse = cmdResponseOverrides[cmdName];
+ // Overrides are single-use.
+ clearCommandOverride(cmdName);
+ assert(cmdResponse);
+
+ jsTestLog("Unittest returning: " + tojsononeline(cmdResponse.responseObj) +
+ ", running: " + tojsononeline(cmdObj));
+ assert(cmdResponse.responseObj);
+ assert(cmdResponse.responseObj.ok === 1 || cmdResponse.responseObj.ok === 0);
- postCommandFuncs[cmdName] = func;
+ runPostCommandFunc(cmdName);
+ return cmdResponse.responseObj;
}
- /**
- * Sets that the given command should return the given response. The command will not actually
- * be run.
- */
- function setCommandMockResponse(cmdName, mockResponse) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+ const res = mongoRunCommandOriginal.apply(this, arguments);
+ print("Unittest received: " + tojsononeline(res) + ", running: " + tojsononeline(cmdObj));
+ runPostCommandFunc(cmdName);
+ return res;
+};
+
+const dbName = "txn_override_unittests";
+const collName1 = "test_coll1";
+const collName2 = "test_coll2";
+
+const rst = new ReplSetTest({nodes: 1});
+rst.startSet();
+rst.initiate();
+const conn = rst.getPrimary();
- cmdResponseOverrides[cmdName] = {responseObj: mockResponse};
+// We have a separate connection for the failpoint so that it does not break up the transaction
+// buffered in network_error_and_txn_override.js.
+const failpointConn = new Mongo(conn.host);
+
+/**
+ * Marks that the given command should fail with the given parameters using the failCommand
+ * failpoint. This does not break up a currently active transaction in the override function.
+ * This does override previous uses of the failpoint, however.
+ */
+function failCommandWithFailPoint(commandsToFail, {
+ errorCode: errorCode,
+ closeConnection: closeConnection = false,
+ writeConcernError: writeConcernError,
+ // By default only fail the next request of the given command.
+ mode: mode = {
+ times: 1
+ },
+} = {}) {
+ // The fail point will ignore the WCE if an error code is specified.
+ assert(!(writeConcernError && errorCode),
+ "Cannot specify both a WCE " + tojsononeline(writeConcernError) + " and an error code " +
+ errorCode);
+
+ let data = {
+ failCommands: commandsToFail,
+ };
+
+ if (errorCode) {
+ data["errorCode"] = errorCode;
}
- /**
- * Sets that the given command should fail with ok:1 and the given write concern error.
- * The command will not actually be run.
- */
- function failCommandWithWCENoRun(cmdName, writeConcernErrorCode, writeConcernErrorCodeName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+ if (closeConnection) {
+ data["closeConnection"] = closeConnection;
+ }
- cmdResponseOverrides[cmdName] = {
- responseObj: {
- ok: 1,
- writeConcernError:
- {code: writeConcernErrorCode, codeName: writeConcernErrorCodeName}
- }
- };
+ if (writeConcernError) {
+ data["writeConcernError"] = writeConcernError;
}
- /**
- * Sets that the given command should fail with the given error and the given write concern
- * error. The command will not actually be run.
- */
- function failCommandWithErrorAndWCENoRun(
- cmdName, errorCode, errorCodeName, writeConcernErrorCode, writeConcernErrorCodeName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
+ assert.commandWorked(mongoRunCommandOriginal.apply(
+ failpointConn, ['admin', {configureFailPoint: "failCommand", mode: mode, data: data}, 0]));
+}
- cmdResponseOverrides[cmdName] = {
- responseObj: {
- ok: 0,
- code: errorCode,
- codeName: errorCodeName,
+/**
+ * Turns off the failCommand failpoint completely.
+ */
+function stopFailingCommands() {
+ assert.commandWorked(mongoRunCommandOriginal.apply(
+ failpointConn, ['admin', {configureFailPoint: "failCommand", mode: "off"}, 0]));
+}
+
+/**
+ * Run a 'ping' command that is not allowed in a transaction. This has no effect, but causes
+ * network_error_and_txn_override.js to commit the current transaction in order to run the
+ * 'ping'.
+ */
+function endCurrentTransactionIfOpen() {
+ print("=-=-=-= Ending current transaction if open");
+ assert.commandWorked(testDB.runCommand({ping: 1}));
+}
+
+/**
+ * Aborts the current transaction in network_error_and_txn_override.js.
+ */
+function abortCurrentTransaction() {
+ const session = testDB.getSession();
+ const lsid = session.getSessionId();
+ const txnNum = TestData.currentTxnOverrideTxnNumber;
+ print("=-=-=-= Aborting current transaction " + txnNum + " on " + tojsononeline(lsid));
+
+ assert.commandWorked(mongoRunCommandOriginal.apply(
+ testDB.getMongo(),
+ ['admin', {abortTransaction: 1, autocommit: false, lsid: lsid, txnNumber: txnNum}, 0]));
+}
+
+/**
+ * Runs a test where a transaction attempts to use a forbidden database name. When running a
+ * CRUD operation on one of these databases, network_error_and_txn_override.js is expected to
+ * commit the current transaction and run the CRUD operation outside of a transaction.
+ */
+function testBadDBName(session, badDBName) {
+ const badDB = session.getDatabase(badDBName);
+ const badColl = badDB['foo'];
+ assert.commandWorked(badDB.createCollection(collName1));
+
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+
+ assert.commandWorked(badColl.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(badColl.find().itcount(), 1);
+
+ // We attempt another insert in the 'bad collection' that gets a 'DuplicateKey' error.
+ // 'DuplicateKey' errors cause transactions to abort, so if this error were received in a
+ // transaction, we would expect the transaction to get aborted and the collections to be
+ // empty. Since this is not running in a transaction, even though the statement fails, the
+ // previous inserts do not storage-rollback.
+ assert.commandFailedWithCode(badColl.insert({_id: 1}), ErrorCodes.DuplicateKey);
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(badColl.find().itcount(), 1);
+}
+
+/**
+ * Runs a specific test case, resetting test state before and after.
+ */
+function runTest(testSuite, testCase) {
+ // Drop with majority write concern to ensure transactions in subsequent test cases can
+ // immediately take locks on either collection.
+ coll1.drop({writeConcern: {w: "majority"}});
+ coll2.drop({writeConcern: {w: "majority"}});
+
+ // Ensure all overrides and failpoints have been turned off before running the test.
+ clearAllCommandOverrides();
+ stopFailingCommands();
+
+ jsTestLog(testSuite + ": Testing " + testCase.name);
+ testCase.test();
+
+ // End the current transaction if the test did not end it itself.
+ endCurrentTransactionIfOpen();
+ jsTestLog(testSuite + ": Test " + testCase.name + " complete.");
+
+ // Ensure all overrides and failpoints have been turned off after running the test as well.
+ clearAllCommandOverrides();
+ stopFailingCommands();
+}
+
+const retryOnNetworkErrorTests = [
+ {
+ name: "update with network error after success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ attachPostCmdFunction("update", function() {
+ throw new Error("SocketException");
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "ordinary CRUD ops",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(testDB.runCommand({insert: collName1, documents: [{_id: 2}]}));
+ assert.eq(coll1.find().itcount(), 2);
+ }
+ },
+ {
+ name: "retry on NotMaster",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "retry on NotMaster ordered",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandFailed(
+ testDB.runCommand({insert: collName1, documents: [{_id: 2}], ordered: true}));
+ }
+ },
+ {
+ name: "retry on NotMaster with object change",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ let obj1 = {_id: 1, x: 5};
+ let obj2 = {_id: 2, x: 5};
+ assert.commandWorked(coll1.insert(obj1));
+ assert.commandWorked(coll1.insert(obj2));
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
+ obj1.x = 7;
+ assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
+ }
+ },
+ {
+ name: "implicit collection creation with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["insert"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal stepdown error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun(
+ "insert", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal ordinary error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("insert",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with ordinary error",
+ test: function() {
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.OperationFailed});
+ assert.commandFailed(coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError no success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithWCENoRun("insert", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "update with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ }
+ },
+ {
+ name: "update with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with two stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"],
+ {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "update with chained stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ // Chain multiple update errors together.
+ attachPostCmdFunction("update", function() {
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "commands not run in transactions",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.commandFailedWithCode(coll1.insert({_id: 1}), ErrorCodes.DuplicateKey);
+
+ // If this were run in a transaction, the original insert and the duplicate one would
+ // both be storage-rolled-back and the count would be 0. We test that the count is 1
+ // to prove that the inserts are not in a transaction.
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "transaction commands not retried on retryable code",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ session.startTransaction();
+ assert.commandFailedWithCode(
+ testDB.runCommand({update: collName1, updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}]}),
+ ErrorCodes.NotMaster);
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ }
+ },
+ {
+ name: "transaction commands not retried on network error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {closeConnection: true});
+
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ session.startTransaction();
+ const error = assert.throws(() => {
+ return testDB.runCommand(
+ {update: collName1, updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}]});
+ });
+ assert(isNetworkError(error), tojson(error));
+ assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ }
+ },
+ {
+ name: "commitTransaction retried on retryable code",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ }
+ },
+ {
+ name: "commitTransaction retried on write concern error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {
writeConcernError:
- {code: writeConcernErrorCode, codeName: writeConcernErrorCodeName}
- }
- };
- }
+ {code: ErrorCodes.PrimarySteppedDown, codeName: "PrimarySteppedDown"}
+ });
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ const res = assert.commandWorked(session.commitTransaction_forTesting());
+ assert(!res.hasOwnProperty("writeConcernError"));
- /**
- * Run the post-command function for the given command, if one has been set, and clear it once
- * used.
- */
- function runPostCommandFunc(cmdName) {
- assert(!runCommandOverrideBlacklistedCommands.includes(cmdName));
-
- if (postCommandFuncs[cmdName]) {
- jsTestLog("Running post-command function for " + cmdName);
- try {
- postCommandFuncs[cmdName]();
- } finally {
- clearPostCommandFunc(cmdName);
- }
+ assert.eq(coll1.find().itcount(), 1);
}
- }
+ },
+ {
+ name: "commitTransaction not retried on transient transaction error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ // Abort the transaction so the commit receives NoSuchTransaction. Note that the fail
+ // command failpoint isn't used because it returns without implicitly aborting the
+ // transaction.
+ const lsid = session.getSessionId();
+ const txnNumber = NumberLong(session.getTxnNumber_forTesting());
+ assert.commandWorked(testDB.adminCommand(
+ {abortTransaction: 1, lsid, txnNumber, autocommit: false, stmtId: NumberInt(0)}));
+
+ const res = assert.commandFailedWithCode(session.commitTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.eq(["TransientTransactionError"], res.errorLabels);
+
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "commitTransaction retried on network error",
+ test: function() {
+ const session = testDB.getSession();
- /**
- * Overrides 'runCommand' to provide a specific pre-set response to the given command. If the
- * command is in the blacklist, it is not overridden. Otherwise, if a command response has been
- * specified, returns that without running the function. If a post-command function is specified
- * for the command, runs that after the command is run. The post-command function is run
- * regardless of whether the command response was overridden or not.
- */
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function(dbName, cmdObj, options) {
- const cmdName = Object.keys(cmdObj)[0];
- if (runCommandOverrideBlacklistedCommands.includes(cmdName)) {
- return mongoRunCommandOriginal.apply(this, arguments);
- }
-
- if (cmdResponseOverrides.hasOwnProperty(cmdName)) {
- const cmdResponse = cmdResponseOverrides[cmdName];
- // Overrides are single-use.
- clearCommandOverride(cmdName);
- assert(cmdResponse);
-
- jsTestLog("Unittest returning: " + tojsononeline(cmdResponse.responseObj) +
- ", running: " + tojsononeline(cmdObj));
- assert(cmdResponse.responseObj);
- assert(cmdResponse.responseObj.ok === 1 || cmdResponse.responseObj.ok === 0);
-
- runPostCommandFunc(cmdName);
- return cmdResponse.responseObj;
- }
-
- const res = mongoRunCommandOriginal.apply(this, arguments);
- print("Unittest received: " + tojsononeline(res) + ", running: " + tojsononeline(cmdObj));
- runPostCommandFunc(cmdName);
- return res;
- };
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ assert.commandWorked(session.commitTransaction_forTesting());
+
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ }
+ },
+ {
+ name: "abortTransaction retried on retryable code",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["abortTransaction"], {errorCode: ErrorCodes.NotMaster});
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ assert.commandWorked(session.abortTransaction_forTesting());
+
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "abortTransaction retried on network error",
+ test: function() {
+ const session = testDB.getSession();
- const dbName = "txn_override_unittests";
- const collName1 = "test_coll1";
- const collName2 = "test_coll2";
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["abortTransaction"], {closeConnection: true});
- const rst = new ReplSetTest({nodes: 1});
- rst.startSet();
- rst.initiate();
- const conn = rst.getPrimary();
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
- // We have a separate connection for the failpoint so that it does not break up the transaction
- // buffered in network_error_and_txn_override.js.
- const failpointConn = new Mongo(conn.host);
+ assert.commandWorked(session.abortTransaction_forTesting());
- /**
- * Marks that the given command should fail with the given parameters using the failCommand
- * failpoint. This does not break up a currently active transaction in the override function.
- * This does override previous uses of the failpoint, however.
- */
- function failCommandWithFailPoint(commandsToFail, {
- errorCode: errorCode,
- closeConnection: closeConnection = false,
- writeConcernError: writeConcernError,
- // By default only fail the next request of the given command.
- mode: mode = {times: 1},
- } = {}) {
- // The fail point will ignore the WCE if an error code is specified.
- assert(!(writeConcernError && errorCode),
- "Cannot specify both a WCE " + tojsononeline(writeConcernError) +
- " and an error code " + errorCode);
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "abortTransaction retried on write concern error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["abortTransaction"], {
+ writeConcernError:
+ {code: ErrorCodes.PrimarySteppedDown, codeName: "PrimarySteppedDown"}
+ });
- let data = {
- failCommands: commandsToFail,
- };
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
- if (errorCode) {
- data["errorCode"] = errorCode;
+ // The fail command fail point with a write concern error triggers after the command
+ // is processed, so the retry will find the transaction has already aborted and return
+ // NoSuchTransaction.
+ const res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert(!res.hasOwnProperty("writeConcernError"));
+
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "abortTransaction not retried on transient transaction error",
+ test: function() {
+ const session = testDB.getSession();
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ session.startTransaction();
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+
+ // Abort the transaction so the commit receives NoSuchTransaction. Note that the fail
+ // command failpoint isn't used because it returns without implicitly aborting the
+ // transaction.
+ const lsid = session.getSessionId();
+ const txnNumber = NumberLong(session.getTxnNumber_forTesting());
+ assert.commandWorked(testDB.adminCommand(
+ {abortTransaction: 1, lsid, txnNumber, autocommit: false, stmtId: NumberInt(0)}));
+
+ const res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
+ ErrorCodes.NoSuchTransaction);
+ assert.eq(["TransientTransactionError"], res.errorLabels);
+
+ assert.eq(coll1.find().itcount(), 0);
}
+ },
+ {
+ name: "raw response w/ one retryable error",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
+ shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // The first attempt should fail, but the retry succeeds.
+ assert.commandWorked(coll1.createIndex({x: 1}));
- if (closeConnection) {
- data["closeConnection"] = closeConnection;
+ // The index should exist.
+ const indexes = coll1.getIndexes();
+ assert.eq(2, indexes.length, tojson(indexes));
+ assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
}
+ },
+ {
+ name: "raw response w/ one retryable error and one success",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ // Raw responses only omit a top-level code if more than one error was
+ // returned from a shard, so a third shard is needed.
+ shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
+ shardTwo: {ok: 1},
+ shardThree: {code: ErrorCodes.InternalError, errmsg: "dummy"},
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // The first attempt should fail, but the retry succeeds.
+ assert.commandWorked(coll1.createIndex({x: 1}));
+
+ // The index should exist.
+ const indexes = coll1.getIndexes();
+ assert.eq(2, indexes.length, tojson(indexes));
+ assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
+ }
+ },
+ {
+ name: "raw response w/ one network error",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ shardOne: {code: ErrorCodes.InternalError, errmsg: "dummy"},
+ shardTwo: {code: ErrorCodes.HostUnreachable, errmsg: "dummy"}
+ }
+ });
- if (writeConcernError) {
- data["writeConcernError"] = writeConcernError;
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // The first attempt should fail, but the retry succeeds.
+ assert.commandWorked(coll1.createIndex({x: 1}));
+
+ // The index should exist.
+ const indexes = coll1.getIndexes();
+ assert.eq(2, indexes.length, tojson(indexes));
+ assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
+ }
+ },
+ {
+ name: "raw response ok:1 w/ retryable write concern error",
+ test: function() {
+ // The first encountered write concern error from a shard is attached as the top-level
+ // write concern error.
+ setCommandMockResponse("createIndexes", {
+ ok: 1,
+ raw: {
+ shardOne: {
+ ok: 1,
+ writeConcernError: {
+ code: ErrorCodes.PrimarySteppedDown,
+ codeName: "PrimarySteppedDown",
+ errmsg: "dummy"
+ }
+ },
+ shardTwo: {ok: 1}
+ },
+ writeConcernError: {
+ code: ErrorCodes.PrimarySteppedDown,
+ codeName: "PrimarySteppedDown",
+ errmsg: "dummy"
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // The first attempt should fail, but the retry succeeds.
+ assert.commandWorked(coll1.createIndex({x: 1}));
+
+ // The index should exist.
+ const indexes = coll1.getIndexes();
+ assert.eq(2, indexes.length, tojson(indexes));
+ assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
+ }
+ },
+ {
+ name: "raw response w/ no retryable error",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ shardOne: {code: ErrorCodes.InvalidOptions, errmsg: "dummy"},
+ shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandFailed(coll1.createIndex({x: 1}));
+ }
+ },
+ {
+ name: "raw response w/ only acceptable errors",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ code: ErrorCodes.IndexAlreadyExists,
+ raw: {
+ shardOne: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"},
+ shardTwo: {ok: 1},
+ shardThree: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"}
+ }
+ });
+
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.createIndex({x: 1}));
+ }
+ },
+ {
+ name: "raw response w/ acceptable error and non-acceptable, non-retryable error",
+ test: function() {
+ setCommandMockResponse("createIndexes", {
+ ok: 0,
+ raw: {
+ shardOne: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"},
+ shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
+ }
+ });
+
+ // "Acceptable" errors are not overridden inside raw reponses.
+ assert.commandWorked(testDB.createCollection(collName1));
+ const res = assert.commandFailed(coll1.createIndex({x: 1}));
+ assert(!res.raw.shardOne.ok, tojson(res));
}
+ },
+ {
+ name: "shardCollection retryable code buried in error message",
+ test: function() {
+ setCommandMockResponse("shardCollection", {
+ ok: 0,
+ code: ErrorCodes.OperationFailed,
+ errmsg: "Sharding collection failed :: caused by InterruptedDueToStepdown",
+ });
+
+ // Mock a successful response for the retry, since sharding isn't enabled on the
+ // underlying replica set.
+ attachPostCmdFunction("shardCollection", function() {
+ setCommandMockResponse("shardCollection", {
+ ok: 1,
+ });
+ });
+
+ assert.commandWorked(
+ testDB.runCommand({shardCollection: "dummy_namespace", key: {_id: 1}}));
+ }
+ },
+ {
+ name: "drop retryable code buried in error message",
+ test: function() {
+ setCommandMockResponse("drop", {
+ ok: 0,
+ code: ErrorCodes.OperationFailed,
+ errmsg: "Dropping collection failed :: caused by ShutdownInProgress",
+ });
- assert.commandWorked(mongoRunCommandOriginal.apply(
- failpointConn,
- ['admin', {configureFailPoint: "failCommand", mode: mode, data: data}, 0]));
- }
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(testDB.runCommand({drop: collName1}));
+ }
+ },
+];
+
+// These tests only retry on TransientTransactionErrors. All other errors are expected to cause
+// the test to fail. Failpoints, overrides, and post-command functions are set by default to
+// only run once, so commands should succeed on retry.
+const txnOverrideTests = [
+ {
+ name: "ordinary CRUD ops",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(testDB.runCommand({insert: collName1, documents: [{_id: 2}]}));
+ assert.eq(coll1.find().itcount(), 2);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 2);
+ }
+ },
+ {
+ name: "getMore in transaction",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll1.insert({_id: 2}));
+ assert.eq(coll1.find().itcount(), 2);
+
+ let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
+ const cursorId = cmdRes.cursor.id;
+ assert.gt(cursorId, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.firstBatch.length, 1);
+
+ cmdRes =
+ assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: collName1}));
+ assert.eq(cmdRes.cursor.id, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.nextBatch.length, 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 2);
+ }
+ },
+ {
+ name: "getMore starts transaction",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll1.insert({_id: 2}));
+ assert.eq(coll1.find().itcount(), 2);
+ assert.eq(coll2.find().itcount(), 0);
+
+ let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
+ const cursorId = cmdRes.cursor.id;
+ assert.gt(cursorId, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.firstBatch.length, 1);
+
+ assert.commandWorked(testDB.createCollection(collName2));
+
+ assert.throws(() => testDB.runCommand({getMore: cursorId, collection: collName1}));
+ }
+ },
+ {
+ name: "getMore in different transaction",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll1.insert({_id: 2}));
+ assert.eq(coll1.find().itcount(), 2);
+ assert.eq(coll2.find().itcount(), 0);
+
+ let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
+ const cursorId = cmdRes.cursor.id;
+ assert.gt(cursorId, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.firstBatch.length, 1);
+
+ assert.commandWorked(coll2.insert({_id: 3}));
+ assert.eq(coll1.find().itcount(), 2);
+ assert.eq(coll2.find().itcount(), 1);
+
+ assert.commandWorked(coll2.insert({_id: 4}));
+
+ assert.commandFailed(testDB.runCommand({getMore: cursorId, collection: collName1}));
+ }
+ },
+ {
+ name: "getMore after TransientTransactionError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll1.insert({_id: 2}));
+ assert.eq(coll1.find().itcount(), 2);
+ failCommandWithFailPoint(["find"], {errorCode: ErrorCodes.NoSuchTransaction});
+
+ let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
+ const cursorId = cmdRes.cursor.id;
+ assert.gt(cursorId, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.firstBatch.length, 1);
+
+ cmdRes =
+ assert.commandWorked(testDB.runCommand({getMore: cursorId, collection: collName1}));
+ assert.eq(cmdRes.cursor.id, NumberLong(0));
+ assert.eq(cmdRes.cursor.ns, coll1.getFullName());
+ assert.eq(cmdRes.cursor.nextBatch.length, 1);
+ assert.eq(coll1.find().itcount(), 2);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 2);
+ }
+ },
+ {
+ name: "implicit collection creation",
+ test: function() {
+ const res = assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(1, res.nInserted);
+ assert.eq(coll1.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "errors cause transaction to abort",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.commandFailedWithCode(coll1.insert({_id: 1}), ErrorCodes.DuplicateKey);
+
+ assert.eq(coll1.find().itcount(), 0);
+ }
+ },
+ {
+ name: "update with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ }
+ },
+ {
+ name: "update with NoSuchTransaction error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NoSuchTransaction});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.throws(() => coll1.update({_id: 1}, {$inc: {x: 1}}));
+ }
+ },
+ {
+ name: "update with two stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"],
+ {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "update with chained stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ // Chain multiple update errors together.
+ attachPostCmdFunction("update", function() {
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "implicit collection creation with stepdown",
+ test: function() {
+ failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.NotMaster});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError",
+ test: function() {
+ failCommandWithFailPoint(
+ ["create"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal stepdown error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun(
+ "create", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal ordinary error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("create",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with ordinary error",
+ test: function() {
+ failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.OperationFailed});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with network error",
+ test: function() {
+ failCommandWithFailPoint(["create"], {closeConnection: true});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError no success",
+ test: function() {
+ failCommandWithWCENoRun("create", ErrorCodes.NotMaster, "NotMaster");
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "errors cause the override to abort transactions",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
- /**
- * Turns off the failCommand failpoint completely.
- */
- function stopFailingCommands() {
- assert.commandWorked(mongoRunCommandOriginal.apply(
- failpointConn, ['admin', {configureFailPoint: "failCommand", mode: "off"}, 0]));
- }
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.BadValue});
+ assert.commandFailedWithCode(coll1.insert({_id: 2}), ErrorCodes.BadValue);
- /**
- * Run a 'ping' command that is not allowed in a transaction. This has no effect, but causes
- * network_error_and_txn_override.js to commit the current transaction in order to run the
- * 'ping'.
- */
- function endCurrentTransactionIfOpen() {
- print("=-=-=-= Ending current transaction if open");
- assert.commandWorked(testDB.runCommand({ping: 1}));
- }
+ stopFailingCommands();
+ assert.eq(coll1.find().itcount(), 0);
- /**
- * Aborts the current transaction in network_error_and_txn_override.js.
- */
- function abortCurrentTransaction() {
- const session = testDB.getSession();
- const lsid = session.getSessionId();
- const txnNum = TestData.currentTxnOverrideTxnNumber;
- print("=-=-=-= Aborting current transaction " + txnNum + " on " + tojsononeline(lsid));
-
- assert.commandWorked(mongoRunCommandOriginal.apply(
- testDB.getMongo(),
- ['admin', {abortTransaction: 1, autocommit: false, lsid: lsid, txnNumber: txnNum}, 0]));
- }
+ assert.commandWorked(coll1.insert({_id: 3}));
+ assert.eq(coll1.find().itcount(), 1);
- /**
- * Runs a test where a transaction attempts to use a forbidden database name. When running a
- * CRUD operation on one of these databases, network_error_and_txn_override.js is expected to
- * commit the current transaction and run the CRUD operation outside of a transaction.
- */
- function testBadDBName(session, badDBName) {
- const badDB = session.getDatabase(badDBName);
- const badColl = badDB['foo'];
- assert.commandWorked(badDB.createCollection(collName1));
-
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
-
- assert.commandWorked(badColl.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(badColl.find().itcount(), 1);
-
- // We attempt another insert in the 'bad collection' that gets a 'DuplicateKey' error.
- // 'DuplicateKey' errors cause transactions to abort, so if this error were received in a
- // transaction, we would expect the transaction to get aborted and the collections to be
- // empty. Since this is not running in a transaction, even though the statement fails, the
- // previous inserts do not storage-rollback.
- assert.commandFailedWithCode(badColl.insert({_id: 1}), ErrorCodes.DuplicateKey);
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(badColl.find().itcount(), 1);
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["commitTransaction"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal stepdown error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.NotMaster,
+ "NotMaster",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal NoSuchTransaction error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.NoSuchTransaction,
+ "NoSuchTransaction",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with NoSuchTransaction error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NoSuchTransaction});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError no success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commands in 'admin' database end transaction",
+ test: function() {
+ testBadDBName(session, 'admin');
+ }
+ },
+ {
+ name: "commands in 'config' database end transaction",
+ test: function() {
+ testBadDBName(session, 'config');
+ }
+ },
+ {
+ name: "commands in 'local' database end transaction",
+ test: function() {
+ testBadDBName(session, 'local');
+ }
+ },
+ {
+ name: "getMore on change stream executes outside transaction",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+
+ // Starting a $changeStream aggregation within a transaction would fail, so the
+ // override has to execute this as a standalone command.
+ const changeStream = testDB.collName1.watch();
+ assert.commandWorked(testDB.collName1.insert({_id: 1}));
+ endCurrentTransactionIfOpen();
+
+ // Calling the `next` function on the change stream cursor will trigger a getmore,
+ // which the override must also run as a standalone command.
+ assert.eq(changeStream.next()["fullDocument"], {_id: 1});
+
+ // An aggregation without $changeStream runs within a transaction.
+ let aggCursor = testDB.collName1.aggregate([], {cursor: {batchSize: 0}});
+ assert.eq(aggCursor.next(), {_id: 1});
+
+ // Creating a non-$changeStream aggregation cursor and running its getMore in a
+ // different transaction will fail.
+ aggCursor = testDB.collName1.aggregate([], {cursor: {batchSize: 0}});
+ endCurrentTransactionIfOpen();
+ assert.throws(() => aggCursor.next());
+ }
+ },
+];
+
+// Failpoints, overrides, and post-command functions are set by default to only run once, so
+// commands should succeed on retry.
+const txnOverridePlusRetryOnNetworkErrorTests = [
+ {
+ name: "$where in jstests/core/js4.js",
+ test: function() {
+ const real = {a: 1, b: "abc", c: /abc/i, d: new Date(111911100111), e: null, f: true};
+ assert.commandWorked(coll1.insert(real));
+
+ failCommandWithErrorAndWCENoRun("drop",
+ ErrorCodes.NamespaceNotFound,
+ "NamespaceNotFound",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ coll1.drop();
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+
+ assert.commandWorked(coll1.insert({a: 2, b: {c: 7, d: "d is good"}}));
+ const cursor = coll1.find({
+ $where: function() {
+ assert.eq(3, Object.keySet(obj).length);
+ assert.eq(2, obj.a);
+ assert.eq(7, obj.b.c);
+ assert.eq("d is good", obj.b.d);
+ return true;
+ }
+ });
+ assert.eq(1, cursor.toArray().length);
+ }
+ },
+ {
+ name: "update with network error after success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ attachPostCmdFunction("update", function() {
+ throw new Error("SocketException");
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "retry on NotMaster",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ }
+ },
+ {
+ name: "retry on NotMaster with object change",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ let obj1 = {_id: 1, x: 5};
+ let obj2 = {_id: 2, x: 5};
+ assert.commandWorked(coll1.insert(obj1));
+ assert.commandWorked(coll1.insert(obj2));
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
+ obj1.x = 7;
+ assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
+
+ endCurrentTransactionIfOpen();
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
+ }
+ },
+ {
+ name: "implicit collection creation with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["create"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal stepdown error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun(
+ "create", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError and normal ordinary error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("create",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with ordinary error",
+ test: function() {
+ failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.OperationFailed});
+ assert.throws(() => coll1.insert({_id: 1}));
+ }
+ },
+ {
+ name: "implicit collection creation with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["create"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "implicit collection creation with WriteConcernError no success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithWCENoRun("create", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "update with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with ordinary error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ }
+ },
+ {
+ name: "update with NoSuchTransaction error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NoSuchTransaction});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ }
+ },
+ {
+ name: "update with two stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"],
+ {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "update with chained stepdown errors",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ // Chain multiple update errors together.
+ attachPostCmdFunction("update", function() {
+ failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.eq(coll1.find().toArray(), [{_id: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
+ }
+ },
+ {
+ name: "commit transaction with stepdown",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["commitTransaction"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal stepdown error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.NotMaster,
+ "NotMaster",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal ordinary error",
+ test: function() {
+ // We retry on write concern errors and this doesn't return OperationFailed again.
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal ordinary error twice",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.OperationFailed,
+ "OperationFailed",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ // After commitTransaction fails, fail it again with just the ordinary error.
+ attachPostCmdFunction("commitTransaction", function() {
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.OperationFailed});
+ });
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with ordinary error",
+ test: function() {
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.OperationFailed});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ assert.throws(() => endCurrentTransactionIfOpen());
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError and normal NoSuchTransaction error",
+ test: function() {
+ failCommandWithErrorAndWCENoRun("commitTransaction",
+ ErrorCodes.NoSuchTransaction,
+ "NoSuchTransaction",
+ ErrorCodes.NotMaster,
+ "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with NoSuchTransaction error",
+ test: function() {
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NoSuchTransaction});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with network error",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commit transaction with WriteConcernError no success",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1}));
+ assert.commandWorked(coll2.insert({_id: 1}));
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+
+ endCurrentTransactionIfOpen();
+ assert.eq(coll1.find().itcount(), 1);
+ assert.eq(coll2.find().itcount(), 1);
+ }
+ },
+ {
+ name: "commitTransaction fails with SERVER-38856",
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(
+ ["create"],
+ {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
+
+ // After commitTransaction fails, abort the transaction and drop the collection
+ // as if the transaction were being retried on a different node.
+ attachPostCmdFunction("commitTransaction", function() {
+ abortCurrentTransaction();
+ assert.commandWorked(mongoRunCommandOriginal.apply(testDB.getMongo(),
+ [dbName, {drop: collName2}, 0]));
+ });
+ failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
+ assert.commandWorked(coll1.insert({_id: 1, x: 2}));
+ assert.commandWorked(coll2.insert({_id: 2}));
+ assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 4}}));
+
+ endCurrentTransactionIfOpen();
+
+ assert.docEq(coll1.find().toArray(), [{_id: 1, x: 6}]);
+ assert.docEq(coll2.find().toArray(), [{_id: 2}]);
+ }
+ },
+ {
+ name: 'Dates are copied correctly for SERVER-41917',
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NoSuchTransaction});
+
+ let date = new Date();
+ assert.commandWorked(coll1.insert({_id: 3, a: date}));
+ date.setMilliseconds(date.getMilliseconds() + 2);
+ assert.eq(null, coll1.findOne({_id: 3, a: date}));
+ const origDoc = coll1.findOne({_id: 3});
+ const ret = assert.commandWorked(coll1.update({_id: 3}, {$min: {a: date}}));
+ assert.eq(ret.nModified, 0);
+
+ endCurrentTransactionIfOpen();
+
+ assert.eq(coll1.findOne({_id: 3}).a, origDoc.a);
+ }
+ },
+ {
+ name: 'Timestamps are copied correctly for SERVER-41917',
+ test: function() {
+ assert.commandWorked(testDB.createCollection(collName1));
+ failCommandWithFailPoint(["commitTransaction"],
+ {errorCode: ErrorCodes.NoSuchTransaction});
+
+ let ts = new Timestamp(5, 6);
+ assert.commandWorked(coll1.insert({_id: 3, a: ts}));
+ ts.t++;
+
+ assert.eq(null, coll1.findOne({_id: 3, a: ts}));
+ const origDoc = coll1.findOne({_id: 3});
+ const ret = assert.commandWorked(coll1.update({_id: 3}, {$min: {a: ts}}));
+ assert.eq(ret.nModified, 0);
+
+ endCurrentTransactionIfOpen();
+
+ assert.eq(coll1.findOne({_id: 3}).a, origDoc.a);
+ }
}
+];
- /**
- * Runs a specific test case, resetting test state before and after.
- */
- function runTest(testSuite, testCase) {
- // Drop with majority write concern to ensure transactions in subsequent test cases can
- // immediately take locks on either collection.
- coll1.drop({writeConcern: {w: "majority"}});
- coll2.drop({writeConcern: {w: "majority"}});
-
- // Ensure all overrides and failpoints have been turned off before running the test.
- clearAllCommandOverrides();
- stopFailingCommands();
-
- jsTestLog(testSuite + ": Testing " + testCase.name);
- testCase.test();
-
- // End the current transaction if the test did not end it itself.
- endCurrentTransactionIfOpen();
- jsTestLog(testSuite + ": Test " + testCase.name + " complete.");
-
- // Ensure all overrides and failpoints have been turned off after running the test as well.
- clearAllCommandOverrides();
- stopFailingCommands();
- }
+TestData.networkErrorAndTxnOverrideConfig = {};
+TestData.sessionOptions = new SessionOptions();
+TestData.overrideRetryAttempts = 3;
+
+let session = conn.startSession(TestData.sessionOptions);
+let testDB = session.getDatabase(dbName);
+
+load("jstests/libs/override_methods/network_error_and_txn_override.js");
+
+jsTestLog("=-=-=-=-=-= Testing with 'retry on network error' by itself. =-=-=-=-=-=");
+TestData.sessionOptions = new SessionOptions({retryWrites: true});
+TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = true;
+TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = false;
+
+session = conn.startSession(TestData.sessionOptions);
+testDB = session.getDatabase(dbName);
+let coll1 = testDB[collName1];
+let coll2 = testDB[collName2];
+
+retryOnNetworkErrorTests.forEach((testCase) => runTest("retryOnNetworkErrorTests", testCase));
+
+jsTestLog("=-=-=-=-=-= Testing with 'txn override' by itself. =-=-=-=-=-=");
+TestData.sessionOptions = new SessionOptions({retryWrites: false});
+TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = false;
+TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = true;
+
+session = conn.startSession(TestData.sessionOptions);
+testDB = session.getDatabase(dbName);
+coll1 = testDB[collName1];
+coll2 = testDB[collName2];
+
+txnOverrideTests.forEach((testCase) => runTest("txnOverrideTests", testCase));
+
+jsTestLog("=-=-=-=-=-= Testing 'both txn override and retry on network error'. =-=-=-=-=-=");
+TestData.sessionOptions = new SessionOptions({retryWrites: true});
+TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = true;
+TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = true;
+
+session = conn.startSession(TestData.sessionOptions);
+testDB = session.getDatabase(dbName);
+coll1 = testDB[collName1];
+coll2 = testDB[collName2];
+
+txnOverridePlusRetryOnNetworkErrorTests.forEach(
+ (testCase) => runTest("txnOverridePlusRetryOnNetworkErrorTests", testCase));
- const retryOnNetworkErrorTests = [
- {
- name: "update with network error after success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- attachPostCmdFunction("update", function() {
- throw new Error("SocketException");
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "ordinary CRUD ops",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(testDB.runCommand({insert: collName1, documents: [{_id: 2}]}));
- assert.eq(coll1.find().itcount(), 2);
- }
- },
- {
- name: "retry on NotMaster",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "retry on NotMaster ordered",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
- assert.commandFailed(
- testDB.runCommand({insert: collName1, documents: [{_id: 2}], ordered: true}));
- }
- },
- {
- name: "retry on NotMaster with object change",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- let obj1 = {_id: 1, x: 5};
- let obj2 = {_id: 2, x: 5};
- assert.commandWorked(coll1.insert(obj1));
- assert.commandWorked(coll1.insert(obj2));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
- obj1.x = 7;
- assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
- }
- },
- {
- name: "implicit collection creation with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["insert"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal stepdown error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun(
- "insert", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal ordinary error",
- test: function() {
- failCommandWithErrorAndWCENoRun("insert",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with ordinary error",
- test: function() {
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.OperationFailed});
- assert.commandFailed(coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError no success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("insert", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "update with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
- }
- },
- {
- name: "update with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with two stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"],
- {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "update with chained stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- // Chain multiple update errors together.
- attachPostCmdFunction("update", function() {
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "commands not run in transactions",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.commandFailedWithCode(coll1.insert({_id: 1}), ErrorCodes.DuplicateKey);
-
- // If this were run in a transaction, the original insert and the duplicate one would
- // both be storage-rolled-back and the count would be 0. We test that the count is 1
- // to prove that the inserts are not in a transaction.
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "transaction commands not retried on retryable code",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
-
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- session.startTransaction();
- assert.commandFailedWithCode(
- testDB.runCommand(
- {update: collName1, updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}]}),
- ErrorCodes.NotMaster);
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- }
- },
- {
- name: "transaction commands not retried on network error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {closeConnection: true});
-
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- session.startTransaction();
- const error = assert.throws(() => {
- return testDB.runCommand(
- {update: collName1, updates: [{q: {_id: 1}, u: {$inc: {x: 1}}}]});
- });
- assert(isNetworkError(error), tojson(error));
- assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
-
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- }
- },
- {
- name: "commitTransaction retried on retryable code",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- }
- },
- {
- name: "commitTransaction retried on write concern error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {
- writeConcernError:
- {code: ErrorCodes.PrimarySteppedDown, codeName: "PrimarySteppedDown"}
- });
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- const res = assert.commandWorked(session.commitTransaction_forTesting());
- assert(!res.hasOwnProperty("writeConcernError"));
-
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "commitTransaction not retried on transient transaction error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- // Abort the transaction so the commit receives NoSuchTransaction. Note that the fail
- // command failpoint isn't used because it returns without implicitly aborting the
- // transaction.
- const lsid = session.getSessionId();
- const txnNumber = NumberLong(session.getTxnNumber_forTesting());
- assert.commandWorked(testDB.adminCommand(
- {abortTransaction: 1, lsid, txnNumber, autocommit: false, stmtId: NumberInt(0)}));
-
- const res = assert.commandFailedWithCode(session.commitTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(["TransientTransactionError"], res.errorLabels);
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "commitTransaction retried on network error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- }
- },
- {
- name: "abortTransaction retried on retryable code",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["abortTransaction"], {errorCode: ErrorCodes.NotMaster});
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- assert.commandWorked(session.abortTransaction_forTesting());
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "abortTransaction retried on network error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["abortTransaction"], {closeConnection: true});
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- assert.commandWorked(session.abortTransaction_forTesting());
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "abortTransaction retried on write concern error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["abortTransaction"], {
- writeConcernError:
- {code: ErrorCodes.PrimarySteppedDown, codeName: "PrimarySteppedDown"}
- });
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- // The fail command fail point with a write concern error triggers after the command
- // is processed, so the retry will find the transaction has already aborted and return
- // NoSuchTransaction.
- const res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert(!res.hasOwnProperty("writeConcernError"));
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "abortTransaction not retried on transient transaction error",
- test: function() {
- const session = testDB.getSession();
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- session.startTransaction();
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
-
- // Abort the transaction so the commit receives NoSuchTransaction. Note that the fail
- // command failpoint isn't used because it returns without implicitly aborting the
- // transaction.
- const lsid = session.getSessionId();
- const txnNumber = NumberLong(session.getTxnNumber_forTesting());
- assert.commandWorked(testDB.adminCommand(
- {abortTransaction: 1, lsid, txnNumber, autocommit: false, stmtId: NumberInt(0)}));
-
- const res = assert.commandFailedWithCode(session.abortTransaction_forTesting(),
- ErrorCodes.NoSuchTransaction);
- assert.eq(["TransientTransactionError"], res.errorLabels);
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "raw response w/ one retryable error",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
- shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- // The first attempt should fail, but the retry succeeds.
- assert.commandWorked(coll1.createIndex({x: 1}));
-
- // The index should exist.
- const indexes = coll1.getIndexes();
- assert.eq(2, indexes.length, tojson(indexes));
- assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
- }
- },
- {
- name: "raw response w/ one retryable error and one success",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- // Raw responses only omit a top-level code if more than one error was
- // returned from a shard, so a third shard is needed.
- shardOne: {code: ErrorCodes.NotMaster, errmsg: "dummy"},
- shardTwo: {ok: 1},
- shardThree: {code: ErrorCodes.InternalError, errmsg: "dummy"},
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- // The first attempt should fail, but the retry succeeds.
- assert.commandWorked(coll1.createIndex({x: 1}));
-
- // The index should exist.
- const indexes = coll1.getIndexes();
- assert.eq(2, indexes.length, tojson(indexes));
- assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
- }
- },
- {
- name: "raw response w/ one network error",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- shardOne: {code: ErrorCodes.InternalError, errmsg: "dummy"},
- shardTwo: {code: ErrorCodes.HostUnreachable, errmsg: "dummy"}
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- // The first attempt should fail, but the retry succeeds.
- assert.commandWorked(coll1.createIndex({x: 1}));
-
- // The index should exist.
- const indexes = coll1.getIndexes();
- assert.eq(2, indexes.length, tojson(indexes));
- assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
- }
- },
- {
- name: "raw response ok:1 w/ retryable write concern error",
- test: function() {
- // The first encountered write concern error from a shard is attached as the top-level
- // write concern error.
- setCommandMockResponse("createIndexes", {
- ok: 1,
- raw: {
- shardOne: {
- ok: 1,
- writeConcernError: {
- code: ErrorCodes.PrimarySteppedDown,
- codeName: "PrimarySteppedDown",
- errmsg: "dummy"
- }
- },
- shardTwo: {ok: 1}
- },
- writeConcernError: {
- code: ErrorCodes.PrimarySteppedDown,
- codeName: "PrimarySteppedDown",
- errmsg: "dummy"
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
-
- // The first attempt should fail, but the retry succeeds.
- assert.commandWorked(coll1.createIndex({x: 1}));
-
- // The index should exist.
- const indexes = coll1.getIndexes();
- assert.eq(2, indexes.length, tojson(indexes));
- assert(indexes.some(idx => idx.name === "x_1"), tojson(indexes));
- }
- },
- {
- name: "raw response w/ no retryable error",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- shardOne: {code: ErrorCodes.InvalidOptions, errmsg: "dummy"},
- shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandFailed(coll1.createIndex({x: 1}));
- }
- },
- {
- name: "raw response w/ only acceptable errors",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- code: ErrorCodes.IndexAlreadyExists,
- raw: {
- shardOne: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"},
- shardTwo: {ok: 1},
- shardThree: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"}
- }
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.createIndex({x: 1}));
- }
- },
- {
- name: "raw response w/ acceptable error and non-acceptable, non-retryable error",
- test: function() {
- setCommandMockResponse("createIndexes", {
- ok: 0,
- raw: {
- shardOne: {code: ErrorCodes.IndexAlreadyExists, errmsg: "dummy"},
- shardTwo: {code: ErrorCodes.InternalError, errmsg: "dummy"}
- }
- });
-
- // "Acceptable" errors are not overridden inside raw reponses.
- assert.commandWorked(testDB.createCollection(collName1));
- const res = assert.commandFailed(coll1.createIndex({x: 1}));
- assert(!res.raw.shardOne.ok, tojson(res));
- }
- },
- {
- name: "shardCollection retryable code buried in error message",
- test: function() {
- setCommandMockResponse("shardCollection", {
- ok: 0,
- code: ErrorCodes.OperationFailed,
- errmsg: "Sharding collection failed :: caused by InterruptedDueToStepdown",
- });
-
- // Mock a successful response for the retry, since sharding isn't enabled on the
- // underlying replica set.
- attachPostCmdFunction("shardCollection", function() {
- setCommandMockResponse("shardCollection", {
- ok: 1,
- });
- });
-
- assert.commandWorked(
- testDB.runCommand({shardCollection: "dummy_namespace", key: {_id: 1}}));
- }
- },
- {
- name: "drop retryable code buried in error message",
- test: function() {
- setCommandMockResponse("drop", {
- ok: 0,
- code: ErrorCodes.OperationFailed,
- errmsg: "Dropping collection failed :: caused by ShutdownInProgress",
- });
-
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(testDB.runCommand({drop: collName1}));
- }
- },
- ];
-
- // These tests only retry on TransientTransactionErrors. All other errors are expected to cause
- // the test to fail. Failpoints, overrides, and post-command functions are set by default to
- // only run once, so commands should succeed on retry.
- const txnOverrideTests = [
- {
- name: "ordinary CRUD ops",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(testDB.runCommand({insert: collName1, documents: [{_id: 2}]}));
- assert.eq(coll1.find().itcount(), 2);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 2);
- }
- },
- {
- name: "getMore in transaction",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll1.insert({_id: 2}));
- assert.eq(coll1.find().itcount(), 2);
-
- let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
- const cursorId = cmdRes.cursor.id;
- assert.gt(cursorId, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
-
- cmdRes = assert.commandWorked(
- testDB.runCommand({getMore: cursorId, collection: collName1}));
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 2);
- }
- },
- {
- name: "getMore starts transaction",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll1.insert({_id: 2}));
- assert.eq(coll1.find().itcount(), 2);
- assert.eq(coll2.find().itcount(), 0);
-
- let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
- const cursorId = cmdRes.cursor.id;
- assert.gt(cursorId, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
-
- assert.commandWorked(testDB.createCollection(collName2));
-
- assert.throws(() => testDB.runCommand({getMore: cursorId, collection: collName1}));
- }
- },
- {
- name: "getMore in different transaction",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll1.insert({_id: 2}));
- assert.eq(coll1.find().itcount(), 2);
- assert.eq(coll2.find().itcount(), 0);
-
- let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
- const cursorId = cmdRes.cursor.id;
- assert.gt(cursorId, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
-
- assert.commandWorked(coll2.insert({_id: 3}));
- assert.eq(coll1.find().itcount(), 2);
- assert.eq(coll2.find().itcount(), 1);
-
- assert.commandWorked(coll2.insert({_id: 4}));
-
- assert.commandFailed(testDB.runCommand({getMore: cursorId, collection: collName1}));
- }
- },
- {
- name: "getMore after TransientTransactionError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll1.insert({_id: 2}));
- assert.eq(coll1.find().itcount(), 2);
- failCommandWithFailPoint(["find"], {errorCode: ErrorCodes.NoSuchTransaction});
-
- let cmdRes = assert.commandWorked(testDB.runCommand({find: collName1, batchSize: 1}));
- const cursorId = cmdRes.cursor.id;
- assert.gt(cursorId, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.firstBatch.length, 1);
-
- cmdRes = assert.commandWorked(
- testDB.runCommand({getMore: cursorId, collection: collName1}));
- assert.eq(cmdRes.cursor.id, NumberLong(0));
- assert.eq(cmdRes.cursor.ns, coll1.getFullName());
- assert.eq(cmdRes.cursor.nextBatch.length, 1);
- assert.eq(coll1.find().itcount(), 2);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 2);
- }
- },
- {
- name: "implicit collection creation",
- test: function() {
- const res = assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(1, res.nInserted);
- assert.eq(coll1.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "errors cause transaction to abort",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.commandFailedWithCode(coll1.insert({_id: 1}), ErrorCodes.DuplicateKey);
-
- assert.eq(coll1.find().itcount(), 0);
- }
- },
- {
- name: "update with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
- }
- },
- {
- name: "update with NoSuchTransaction error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NoSuchTransaction});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.throws(() => coll1.update({_id: 1}, {$inc: {x: 1}}));
- }
- },
- {
- name: "update with two stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"],
- {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "update with chained stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- // Chain multiple update errors together.
- attachPostCmdFunction("update", function() {
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "implicit collection creation with stepdown",
- test: function() {
- failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.NotMaster});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with WriteConcernError",
- test: function() {
- failCommandWithFailPoint(
- ["create"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal stepdown error",
- test: function() {
- failCommandWithErrorAndWCENoRun(
- "create", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal ordinary error",
- test: function() {
- failCommandWithErrorAndWCENoRun("create",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with ordinary error",
- test: function() {
- failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.OperationFailed});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with network error",
- test: function() {
- failCommandWithFailPoint(["create"], {closeConnection: true});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with WriteConcernError no success",
- test: function() {
- failCommandWithWCENoRun("create", ErrorCodes.NotMaster, "NotMaster");
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "errors cause the override to abort transactions",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
-
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.BadValue});
- assert.commandFailedWithCode(coll1.insert({_id: 2}), ErrorCodes.BadValue);
-
- stopFailingCommands();
- assert.eq(coll1.find().itcount(), 0);
-
- assert.commandWorked(coll1.insert({_id: 3}));
- assert.eq(coll1.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["commitTransaction"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal stepdown error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NotMaster,
- "NotMaster",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal NoSuchTransaction error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NoSuchTransaction,
- "NoSuchTransaction",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with NoSuchTransaction error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.NoSuchTransaction});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError no success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commands in 'admin' database end transaction",
- test: function() {
- testBadDBName(session, 'admin');
- }
- },
- {
- name: "commands in 'config' database end transaction",
- test: function() {
- testBadDBName(session, 'config');
- }
- },
- {
- name: "commands in 'local' database end transaction",
- test: function() {
- testBadDBName(session, 'local');
- }
- },
- {
- name: "getMore on change stream executes outside transaction",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
-
- // Starting a $changeStream aggregation within a transaction would fail, so the
- // override has to execute this as a standalone command.
- const changeStream = testDB.collName1.watch();
- assert.commandWorked(testDB.collName1.insert({_id: 1}));
- endCurrentTransactionIfOpen();
-
- // Calling the `next` function on the change stream cursor will trigger a getmore,
- // which the override must also run as a standalone command.
- assert.eq(changeStream.next()["fullDocument"], {_id: 1});
-
- // An aggregation without $changeStream runs within a transaction.
- let aggCursor = testDB.collName1.aggregate([], {cursor: {batchSize: 0}});
- assert.eq(aggCursor.next(), {_id: 1});
-
- // Creating a non-$changeStream aggregation cursor and running its getMore in a
- // different transaction will fail.
- aggCursor = testDB.collName1.aggregate([], {cursor: {batchSize: 0}});
- endCurrentTransactionIfOpen();
- assert.throws(() => aggCursor.next());
- }
- },
- ];
-
- // Failpoints, overrides, and post-command functions are set by default to only run once, so
- // commands should succeed on retry.
- const txnOverridePlusRetryOnNetworkErrorTests = [
- {
- name: "$where in jstests/core/js4.js",
- test: function() {
- const real = {a: 1, b: "abc", c: /abc/i, d: new Date(111911100111), e: null, f: true};
- assert.commandWorked(coll1.insert(real));
-
- failCommandWithErrorAndWCENoRun("drop",
- ErrorCodes.NamespaceNotFound,
- "NamespaceNotFound",
- ErrorCodes.NotMaster,
- "NotMaster");
- coll1.drop();
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
-
- assert.commandWorked(coll1.insert({a: 2, b: {c: 7, d: "d is good"}}));
- const cursor = coll1.find({
- $where: function() {
- assert.eq(3, Object.keySet(obj).length);
- assert.eq(2, obj.a);
- assert.eq(7, obj.b.c);
- assert.eq("d is good", obj.b.d);
- return true;
- }
- });
- assert.eq(1, cursor.toArray().length);
- }
- },
- {
- name: "update with network error after success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- attachPostCmdFunction("update", function() {
- throw new Error("SocketException");
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "retry on NotMaster",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["insert"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- }
- },
- {
- name: "retry on NotMaster with object change",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- let obj1 = {_id: 1, x: 5};
- let obj2 = {_id: 2, x: 5};
- assert.commandWorked(coll1.insert(obj1));
- assert.commandWorked(coll1.insert(obj2));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 5}]);
- obj1.x = 7;
- assert.commandWorked(coll1.update({_id: 2}, {$set: {x: 8}}));
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
-
- endCurrentTransactionIfOpen();
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 5}, {_id: 2, x: 8}]);
- }
- },
- {
- name: "implicit collection creation with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["create"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal stepdown error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun(
- "create", ErrorCodes.NotMaster, "NotMaster", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError and normal ordinary error",
- test: function() {
- failCommandWithErrorAndWCENoRun("create",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with ordinary error",
- test: function() {
- failCommandWithFailPoint(["create"], {errorCode: ErrorCodes.OperationFailed});
- assert.throws(() => coll1.insert({_id: 1}));
- }
- },
- {
- name: "implicit collection creation with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["create"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "implicit collection creation with WriteConcernError no success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("create", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "update with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with ordinary error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandFailed(coll1.update({_id: 1}, {$inc: {x: 1}}));
- }
- },
- {
- name: "update with NoSuchTransaction error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NoSuchTransaction});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- }
- },
- {
- name: "update with two stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"],
- {errorCode: ErrorCodes.NotMaster, mode: {times: 2}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "update with chained stepdown errors",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- // Chain multiple update errors together.
- attachPostCmdFunction("update", function() {
- failCommandWithFailPoint(["update"], {errorCode: ErrorCodes.NotMaster});
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.eq(coll1.find().toArray(), [{_id: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1}]);
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {y: 1}}));
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().toArray(), [{_id: 1, x: 1, y: 1}]);
- }
- },
- {
- name: "commit transaction with stepdown",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {errorCode: ErrorCodes.NotMaster});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["commitTransaction"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal stepdown error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NotMaster,
- "NotMaster",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal ordinary error",
- test: function() {
- // We retry on write concern errors and this doesn't return OperationFailed again.
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal ordinary error twice",
- test: function() {
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.OperationFailed,
- "OperationFailed",
- ErrorCodes.NotMaster,
- "NotMaster");
- // After commitTransaction fails, fail it again with just the ordinary error.
- attachPostCmdFunction("commitTransaction", function() {
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.OperationFailed});
- });
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with ordinary error",
- test: function() {
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.OperationFailed});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- assert.throws(() => endCurrentTransactionIfOpen());
- }
- },
- {
- name: "commit transaction with WriteConcernError and normal NoSuchTransaction error",
- test: function() {
- failCommandWithErrorAndWCENoRun("commitTransaction",
- ErrorCodes.NoSuchTransaction,
- "NoSuchTransaction",
- ErrorCodes.NotMaster,
- "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with NoSuchTransaction error",
- test: function() {
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.NoSuchTransaction});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with network error",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"], {closeConnection: true});
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commit transaction with WriteConcernError no success",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1}));
- assert.commandWorked(coll2.insert({_id: 1}));
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
-
- endCurrentTransactionIfOpen();
- assert.eq(coll1.find().itcount(), 1);
- assert.eq(coll2.find().itcount(), 1);
- }
- },
- {
- name: "commitTransaction fails with SERVER-38856",
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(
- ["create"],
- {writeConcernError: {code: ErrorCodes.NotMaster, codeName: "NotMaster"}});
-
- // After commitTransaction fails, abort the transaction and drop the collection
- // as if the transaction were being retried on a different node.
- attachPostCmdFunction("commitTransaction", function() {
- abortCurrentTransaction();
- assert.commandWorked(mongoRunCommandOriginal.apply(
- testDB.getMongo(), [dbName, {drop: collName2}, 0]));
- });
- failCommandWithWCENoRun("commitTransaction", ErrorCodes.NotMaster, "NotMaster");
- assert.commandWorked(coll1.insert({_id: 1, x: 2}));
- assert.commandWorked(coll2.insert({_id: 2}));
- assert.commandWorked(coll1.update({_id: 1}, {$inc: {x: 4}}));
-
- endCurrentTransactionIfOpen();
-
- assert.docEq(coll1.find().toArray(), [{_id: 1, x: 6}]);
- assert.docEq(coll2.find().toArray(), [{_id: 2}]);
- }
- },
- {
- name: 'Dates are copied correctly for SERVER-41917',
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.NoSuchTransaction});
-
- let date = new Date();
- assert.commandWorked(coll1.insert({_id: 3, a: date}));
- date.setMilliseconds(date.getMilliseconds() + 2);
- assert.eq(null, coll1.findOne({_id: 3, a: date}));
- const origDoc = coll1.findOne({_id: 3});
- const ret = assert.commandWorked(coll1.update({_id: 3}, {$min: {a: date}}));
- assert.eq(ret.nModified, 0);
-
- endCurrentTransactionIfOpen();
-
- assert.eq(coll1.findOne({_id: 3}).a, origDoc.a);
- }
- },
- {
- name: 'Timestamps are copied correctly for SERVER-41917',
- test: function() {
- assert.commandWorked(testDB.createCollection(collName1));
- failCommandWithFailPoint(["commitTransaction"],
- {errorCode: ErrorCodes.NoSuchTransaction});
-
- let ts = new Timestamp(5, 6);
- assert.commandWorked(coll1.insert({_id: 3, a: ts}));
- ts.t++;
-
- assert.eq(null, coll1.findOne({_id: 3, a: ts}));
- const origDoc = coll1.findOne({_id: 3});
- const ret = assert.commandWorked(coll1.update({_id: 3}, {$min: {a: ts}}));
- assert.eq(ret.nModified, 0);
-
- endCurrentTransactionIfOpen();
-
- assert.eq(coll1.findOne({_id: 3}).a, origDoc.a);
- }
- }
- ];
-
- TestData.networkErrorAndTxnOverrideConfig = {};
- TestData.sessionOptions = new SessionOptions();
- TestData.overrideRetryAttempts = 3;
-
- let session = conn.startSession(TestData.sessionOptions);
- let testDB = session.getDatabase(dbName);
-
- load("jstests/libs/override_methods/network_error_and_txn_override.js");
-
- jsTestLog("=-=-=-=-=-= Testing with 'retry on network error' by itself. =-=-=-=-=-=");
- TestData.sessionOptions = new SessionOptions({retryWrites: true});
- TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = true;
- TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = false;
-
- session = conn.startSession(TestData.sessionOptions);
- testDB = session.getDatabase(dbName);
- let coll1 = testDB[collName1];
- let coll2 = testDB[collName2];
-
- retryOnNetworkErrorTests.forEach((testCase) => runTest("retryOnNetworkErrorTests", testCase));
-
- jsTestLog("=-=-=-=-=-= Testing with 'txn override' by itself. =-=-=-=-=-=");
- TestData.sessionOptions = new SessionOptions({retryWrites: false});
- TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = false;
- TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = true;
-
- session = conn.startSession(TestData.sessionOptions);
- testDB = session.getDatabase(dbName);
- coll1 = testDB[collName1];
- coll2 = testDB[collName2];
-
- txnOverrideTests.forEach((testCase) => runTest("txnOverrideTests", testCase));
-
- jsTestLog("=-=-=-=-=-= Testing 'both txn override and retry on network error'. =-=-=-=-=-=");
- TestData.sessionOptions = new SessionOptions({retryWrites: true});
- TestData.networkErrorAndTxnOverrideConfig.retryOnNetworkErrors = true;
- TestData.networkErrorAndTxnOverrideConfig.wrapCRUDinTransactions = true;
-
- session = conn.startSession(TestData.sessionOptions);
- testDB = session.getDatabase(dbName);
- coll1 = testDB[collName1];
- coll2 = testDB[collName2];
-
- txnOverridePlusRetryOnNetworkErrorTests.forEach(
- (testCase) => runTest("txnOverridePlusRetryOnNetworkErrorTests", testCase));
-
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/unconditional_step_down.js b/jstests/replsets/unconditional_step_down.js
index 07902aec3fa..c9f95bcb1ac 100644
--- a/jstests/replsets/unconditional_step_down.js
+++ b/jstests/replsets/unconditional_step_down.js
@@ -4,213 +4,211 @@
* @tags: [requires_document_locking]
*/
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+
+const testName = "txnsDuringStepDown";
+const dbName = testName;
+const collName = "testcoll";
+const collNss = dbName + '.' + collName;
+
+const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}]});
+rst.startSet();
+rst.initiate();
+
+let primary;
+let secondary;
+let primaryDB;
+
+function refreshConnection() {
+ primary = rst.getPrimary();
+ primaryDB = primary.getDB(dbName);
+ secondary = rst.getSecondary();
+}
+
+refreshConnection();
+
+jsTestLog("Writing data to collection.");
+assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{_id: 'readOp'}]}));
+rst.awaitReplication();
+
+const readFailPoint = "waitInFindBeforeMakingBatch";
+const writeFailPoint = "hangWithLockDuringBatchInsert";
+
+TestData.dbName = dbName;
+TestData.collName = collName;
+TestData.readFailPoint = readFailPoint;
+TestData.skipRetryOnNetworkError = true;
+
+function runStepDownTest({testMsg, stepDownFn, toRemovedState}) {
+ jsTestLog(`Testing step down due to ${testMsg}`);
+
+ // 'toRemovedState' determines whether to tag the connections not to close when
+ // primary changes its state to removed.
+ toRemovedState = toRemovedState || false;
+
+ // Clears the log before running the test.
+ assert.commandWorked(primary.adminCommand({clearLog: 'global'}));
+
+ jsTestLog("Enable fail point for namespace '" + collNss + "'");
+ // Find command.
+ assert.commandWorked(primary.adminCommand({
+ configureFailPoint: readFailPoint,
+ data: {nss: collNss, shouldCheckForInterrupt: true},
+ mode: "alwaysOn"
+ }));
+ // Insert command.
+ assert.commandWorked(primary.adminCommand({
+ configureFailPoint: writeFailPoint,
+ data: {nss: collNss, shouldCheckForInterrupt: true},
+ mode: "alwaysOn"
+ }));
+
+ var startSafeParallelShell = (func, port) => {
+ TestData.func = func;
+ var safeFunc = (toRemovedState) ? () => {
+ assert.commandWorked(db.adminCommand({isMaster: 1, hangUpOnStepDown: false}));
+ TestData.func();
+ } : func;
+ return startParallelShell(safeFunc, port);
+ };
+
+ const joinReadThread = startSafeParallelShell(() => {
+ jsTestLog("Start blocking find cmd before step down");
+ var findRes = assert.commandWorked(
+ db.getSiblingDB(TestData.dbName).runCommand({"find": TestData.collName}));
+ assert.eq(findRes.cursor.firstBatch.length, 1);
+ }, primary.port);
+
+ const joinWriteThread = startSafeParallelShell(() => {
+ jsTestLog("Start blocking insert cmd before step down");
+ assert.commandFailedWithCode(
+ db.getSiblingDB(TestData.dbName)[TestData.collName].insert([{val: 'writeOp1'}]),
+ ErrorCodes.InterruptedDueToReplStateChange);
+ }, primary.port);
+
+ const joinUnblockStepDown = startSafeParallelShell(() => {
+ load("jstests/libs/check_log.js");
+
+ jsTestLog("Wait for step down to start killing operations");
+ checkLog.contains(db, "Starting to kill user operations");
+
+ jsTestLog("Unblock step down");
+ // Turn off fail point on find cmd to allow step down to continue.
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: TestData.readFailPoint, mode: "off"}));
+ }, primary.port);
- load("jstests/libs/curop_helpers.js"); // for waitForCurOpByFailPoint().
+ jsTestLog("Wait for find cmd to reach the fail point");
+ waitForCurOpByFailPoint(primaryDB, collNss, readFailPoint);
- const testName = "txnsDuringStepDown";
- const dbName = testName;
- const collName = "testcoll";
- const collNss = dbName + '.' + collName;
+ jsTestLog("Wait for write cmd to reach the fail point");
+ waitForCurOpByFailPoint(primaryDB, collNss, writeFailPoint);
- const rst = new ReplSetTest({nodes: [{}, {rsConfig: {priority: 0}}, {arbiter: true}]});
- rst.startSet();
- rst.initiate();
+ jsTestLog("Trigger step down");
+ var oldConfig = stepDownFn();
- let primary;
- let secondary;
- let primaryDB;
+ // Waits for all threads to join.
+ joinUnblockStepDown();
+ joinReadThread();
+ joinWriteThread();
- function refreshConnection() {
- primary = rst.getPrimary();
- primaryDB = primary.getDB(dbName);
- secondary = rst.getSecondary();
- }
+ // Wait till the primary stepped down to primary.
+ waitForState(primary,
+ (toRemovedState) ? ReplSetTest.State.REMOVED : ReplSetTest.State.SECONDARY);
+ assert.commandWorked(primary.adminCommand({configureFailPoint: writeFailPoint, mode: "off"}));
+ // Get the new primary.
refreshConnection();
+}
- jsTestLog("Writing data to collection.");
- assert.commandWorked(primaryDB.runCommand({insert: collName, documents: [{_id: 'readOp'}]}));
- rst.awaitReplication();
-
- const readFailPoint = "waitInFindBeforeMakingBatch";
- const writeFailPoint = "hangWithLockDuringBatchInsert";
-
- TestData.dbName = dbName;
- TestData.collName = collName;
- TestData.readFailPoint = readFailPoint;
- TestData.skipRetryOnNetworkError = true;
-
- function runStepDownTest({testMsg, stepDownFn, toRemovedState}) {
- jsTestLog(`Testing step down due to ${testMsg}`);
-
- // 'toRemovedState' determines whether to tag the connections not to close when
- // primary changes its state to removed.
- toRemovedState = toRemovedState || false;
-
- // Clears the log before running the test.
- assert.commandWorked(primary.adminCommand({clearLog: 'global'}));
-
- jsTestLog("Enable fail point for namespace '" + collNss + "'");
- // Find command.
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: readFailPoint,
- data: {nss: collNss, shouldCheckForInterrupt: true},
- mode: "alwaysOn"
- }));
- // Insert command.
- assert.commandWorked(primary.adminCommand({
- configureFailPoint: writeFailPoint,
- data: {nss: collNss, shouldCheckForInterrupt: true},
- mode: "alwaysOn"
- }));
-
- var startSafeParallelShell = (func, port) => {
- TestData.func = func;
- var safeFunc = (toRemovedState) ? () => {
- assert.commandWorked(db.adminCommand({isMaster: 1, hangUpOnStepDown: false}));
- TestData.func();
- } : func;
- return startParallelShell(safeFunc, port);
- };
-
- const joinReadThread = startSafeParallelShell(() => {
- jsTestLog("Start blocking find cmd before step down");
- var findRes = assert.commandWorked(
- db.getSiblingDB(TestData.dbName).runCommand({"find": TestData.collName}));
- assert.eq(findRes.cursor.firstBatch.length, 1);
- }, primary.port);
-
- const joinWriteThread = startSafeParallelShell(() => {
- jsTestLog("Start blocking insert cmd before step down");
- assert.commandFailedWithCode(
- db.getSiblingDB(TestData.dbName)[TestData.collName].insert([{val: 'writeOp1'}]),
- ErrorCodes.InterruptedDueToReplStateChange);
- }, primary.port);
-
- const joinUnblockStepDown = startSafeParallelShell(() => {
- load("jstests/libs/check_log.js");
-
- jsTestLog("Wait for step down to start killing operations");
- checkLog.contains(db, "Starting to kill user operations");
-
- jsTestLog("Unblock step down");
- // Turn off fail point on find cmd to allow step down to continue.
- assert.commandWorked(
- db.adminCommand({configureFailPoint: TestData.readFailPoint, mode: "off"}));
- }, primary.port);
-
- jsTestLog("Wait for find cmd to reach the fail point");
- waitForCurOpByFailPoint(primaryDB, collNss, readFailPoint);
-
- jsTestLog("Wait for write cmd to reach the fail point");
- waitForCurOpByFailPoint(primaryDB, collNss, writeFailPoint);
-
- jsTestLog("Trigger step down");
- var oldConfig = stepDownFn();
-
- // Waits for all threads to join.
- joinUnblockStepDown();
- joinReadThread();
- joinWriteThread();
-
- // Wait till the primary stepped down to primary.
- waitForState(primary,
- (toRemovedState) ? ReplSetTest.State.REMOVED : ReplSetTest.State.SECONDARY);
+function runStepsDowntoRemoved(params) {
+ var oldConfigBeforeTest = rst.getReplSetConfigFromNode();
- assert.commandWorked(
- primary.adminCommand({configureFailPoint: writeFailPoint, mode: "off"}));
- // Get the new primary.
- refreshConnection();
+ // Run the test.
+ params["toRemovedState"] = true;
+ runStepDownTest(params);
+ oldConfigBeforeTest.version = ++(rst.getReplSetConfigFromNode().version);
+
+ // On exit, add the removed node back to replica set.
+ assert.commandWorked(primary.adminCommand({replSetReconfig: oldConfigBeforeTest, force: true}));
+ refreshConnection();
+}
+
+runStepDownTest({
+ testMsg: "reconfig command",
+ stepDownFn: () => {
+ load("./jstests/replsets/rslib.js");
+ var newConfig = rst.getReplSetConfigFromNode();
+
+ var oldMasterId = rst.getNodeId(primary);
+ var newMasterId = rst.getNodeId(secondary);
+
+ newConfig.members[oldMasterId].priority = 0;
+ newConfig.members[newMasterId].priority = 1;
+ newConfig.version++;
+
+ // Run it on primary
+ assert.commandWorked(primary.adminCommand({replSetReconfig: newConfig, force: true}));
}
+});
- function runStepsDowntoRemoved(params) {
- var oldConfigBeforeTest = rst.getReplSetConfigFromNode();
+runStepDownTest({
+ testMsg: "reconfig via heartbeat",
+ stepDownFn: () => {
+ load("./jstests/replsets/rslib.js");
+ var newConfig = rst.getReplSetConfigFromNode();
- // Run the test.
- params["toRemovedState"] = true;
- runStepDownTest(params);
- oldConfigBeforeTest.version = ++(rst.getReplSetConfigFromNode().version);
+ var oldMasterId = rst.getNodeId(primary);
+ var newMasterId = rst.getNodeId(secondary);
- // On exit, add the removed node back to replica set.
- assert.commandWorked(
- primary.adminCommand({replSetReconfig: oldConfigBeforeTest, force: true}));
- refreshConnection();
+ newConfig.members[oldMasterId].priority = 0;
+ newConfig.members[newMasterId].priority = 1;
+ newConfig.version++;
+
+ // Run it on secondary
+ assert.commandWorked(secondary.adminCommand({replSetReconfig: newConfig, force: true}));
+ }
+});
+
+runStepsDowntoRemoved({
+ testMsg: "reconfig via heartbeat - primary to removed",
+ stepDownFn: () => {
+ load("./jstests/replsets/rslib.js");
+ var newConfig = rst.getReplSetConfigFromNode();
+
+ var oldMasterId = rst.getNodeId(primary);
+ var newMasterId = rst.getNodeId(secondary);
+
+ newConfig.members[newMasterId].priority = 1;
+ // Remove the current primary from the config
+ newConfig.members.splice(oldMasterId, 1);
+ newConfig.version++;
+
+ // Run it on secondary
+ assert.commandWorked(secondary.adminCommand({replSetReconfig: newConfig, force: true}));
+ }
+});
+
+runStepDownTest({
+ testMsg: "stepdown via heartbeat",
+ stepDownFn: () => {
+ load("./jstests/replsets/rslib.js");
+ var newConfig = rst.getReplSetConfigFromNode();
+
+ var newMasterId = rst.getNodeId(secondary);
+
+ newConfig.members[newMasterId].priority = 2;
+ newConfig.version++;
+
+ // Run it on primary
+ assert.commandWorked(primary.adminCommand({replSetReconfig: newConfig, force: false}));
+
+ // Now, step up the secondary which will make the current primary to step down.
+ rst.stepUp(secondary);
}
+});
- runStepDownTest({
- testMsg: "reconfig command",
- stepDownFn: () => {
- load("./jstests/replsets/rslib.js");
- var newConfig = rst.getReplSetConfigFromNode();
-
- var oldMasterId = rst.getNodeId(primary);
- var newMasterId = rst.getNodeId(secondary);
-
- newConfig.members[oldMasterId].priority = 0;
- newConfig.members[newMasterId].priority = 1;
- newConfig.version++;
-
- // Run it on primary
- assert.commandWorked(primary.adminCommand({replSetReconfig: newConfig, force: true}));
- }
- });
-
- runStepDownTest({
- testMsg: "reconfig via heartbeat",
- stepDownFn: () => {
- load("./jstests/replsets/rslib.js");
- var newConfig = rst.getReplSetConfigFromNode();
-
- var oldMasterId = rst.getNodeId(primary);
- var newMasterId = rst.getNodeId(secondary);
-
- newConfig.members[oldMasterId].priority = 0;
- newConfig.members[newMasterId].priority = 1;
- newConfig.version++;
-
- // Run it on secondary
- assert.commandWorked(secondary.adminCommand({replSetReconfig: newConfig, force: true}));
- }
- });
-
- runStepsDowntoRemoved({
- testMsg: "reconfig via heartbeat - primary to removed",
- stepDownFn: () => {
- load("./jstests/replsets/rslib.js");
- var newConfig = rst.getReplSetConfigFromNode();
-
- var oldMasterId = rst.getNodeId(primary);
- var newMasterId = rst.getNodeId(secondary);
-
- newConfig.members[newMasterId].priority = 1;
- // Remove the current primary from the config
- newConfig.members.splice(oldMasterId, 1);
- newConfig.version++;
-
- // Run it on secondary
- assert.commandWorked(secondary.adminCommand({replSetReconfig: newConfig, force: true}));
- }
- });
-
- runStepDownTest({
- testMsg: "stepdown via heartbeat",
- stepDownFn: () => {
- load("./jstests/replsets/rslib.js");
- var newConfig = rst.getReplSetConfigFromNode();
-
- var newMasterId = rst.getNodeId(secondary);
-
- newConfig.members[newMasterId].priority = 2;
- newConfig.version++;
-
- // Run it on primary
- assert.commandWorked(primary.adminCommand({replSetReconfig: newConfig, force: false}));
-
- // Now, step up the secondary which will make the current primary to step down.
- rst.stepUp(secondary);
- }
- });
-
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/uninitialized_fcv_access.js b/jstests/replsets/uninitialized_fcv_access.js
index f4cdfae7674..dd2ff681e4c 100644
--- a/jstests/replsets/uninitialized_fcv_access.js
+++ b/jstests/replsets/uninitialized_fcv_access.js
@@ -3,30 +3,33 @@
* initialized does not crash the server (see SERVER-34600).
*/
(function() {
- 'use strict';
- load('jstests/libs/feature_compatibility_version.js');
+'use strict';
+load('jstests/libs/feature_compatibility_version.js');
- let rst = new ReplSetTest({nodes: 2});
- rst.startSet();
- let node = rst.nodes[0];
+let rst = new ReplSetTest({nodes: 2});
+rst.startSet();
+let node = rst.nodes[0];
- // The featureCompatibilityVersion parameter is initialized during rst.initiate(), so calling
- // getParameter on the fCV before then will attempt to access an uninitialized fCV.
+// The featureCompatibilityVersion parameter is initialized during rst.initiate(), so calling
+// getParameter on the fCV before then will attempt to access an uninitialized fCV.
- const getParamCmd = {getParameter: 1, featureCompatibilityVersion: 1};
- assert.commandFailedWithCode(node.getDB('admin').runCommand(getParamCmd),
- ErrorCodes.UnknownFeatureCompatibilityVersion,
- 'expected ' + tojson(getParamCmd) +
- ' to fail with code UnknownFeatureCompatibilityVersion');
+const getParamCmd = {
+ getParameter: 1,
+ featureCompatibilityVersion: 1
+};
+assert.commandFailedWithCode(
+ node.getDB('admin').runCommand(getParamCmd),
+ ErrorCodes.UnknownFeatureCompatibilityVersion,
+ 'expected ' + tojson(getParamCmd) + ' to fail with code UnknownFeatureCompatibilityVersion');
- rst.initiate();
+rst.initiate();
- // After the replica set is initialized, getParameter should successfully return the fCV.
+// After the replica set is initialized, getParameter should successfully return the fCV.
- const primary = rst.getPrimary();
- const res = primary.adminCommand(getParamCmd);
- assert.commandWorked(res);
- assert.eq(res.featureCompatibilityVersion.version, latestFCV, tojson(res));
+const primary = rst.getPrimary();
+const res = primary.adminCommand(getParamCmd);
+assert.commandWorked(res);
+assert.eq(res.featureCompatibilityVersion.version, latestFCV, tojson(res));
- rst.stopSet();
+rst.stopSet();
})();
diff --git a/jstests/replsets/update_commit_point_from_sync_source_ignores_term.js b/jstests/replsets/update_commit_point_from_sync_source_ignores_term.js
index 7915dfd4b7b..61a4c339fdf 100644
--- a/jstests/replsets/update_commit_point_from_sync_source_ignores_term.js
+++ b/jstests/replsets/update_commit_point_from_sync_source_ignores_term.js
@@ -4,84 +4,83 @@
* @tags: [requires_majority_read_concern]
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
+load("jstests/libs/write_concern_util.js"); // for [stop|restart]ServerReplication.
- const dbName = "test";
- const collName = "coll";
+const dbName = "test";
+const collName = "coll";
- // Set up a ReplSetTest where nodes only sync one oplog entry at a time.
- const rst = new ReplSetTest(
- {nodes: 5, useBridge: true, nodeOptions: {setParameter: "bgSyncOplogFetcherBatchSize=1"}});
- rst.startSet();
- const config = rst.getReplSetConfig();
- // Ban chaining and prevent elections.
- config.settings = {chainingAllowed: false, electionTimeoutMillis: 12 * 60 * 60 * 1000};
- rst.initiate(config);
+// Set up a ReplSetTest where nodes only sync one oplog entry at a time.
+const rst = new ReplSetTest(
+ {nodes: 5, useBridge: true, nodeOptions: {setParameter: "bgSyncOplogFetcherBatchSize=1"}});
+rst.startSet();
+const config = rst.getReplSetConfig();
+// Ban chaining and prevent elections.
+config.settings = {
+ chainingAllowed: false,
+ electionTimeoutMillis: 12 * 60 * 60 * 1000
+};
+rst.initiate(config);
- const nodeA = rst.nodes[0];
- const nodeB = rst.nodes[1];
- const nodeC = rst.nodes[2];
- const nodeD = rst.nodes[3];
- const nodeE = rst.nodes[4];
+const nodeA = rst.nodes[0];
+const nodeB = rst.nodes[1];
+const nodeC = rst.nodes[2];
+const nodeD = rst.nodes[3];
+const nodeE = rst.nodes[4];
- jsTest.log("Node A is primary in term 1. Replicate a write to Node E that is not committed.");
- assert.eq(nodeA, rst.getPrimary());
- // Ensure Node E has a majority committed snapshot.
- assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "dummy"}));
- rst.awaitLastOpCommitted();
- stopServerReplication([nodeB, nodeC, nodeD]);
- assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "term 1, doc 1"}));
- rst.awaitReplication(undefined, undefined, [nodeE]);
- assert.eq(0,
- nodeE.getDB(dbName)[collName]
- .find({_id: "term 1, doc 1"})
- .readConcern("majority")
- .itcount());
+jsTest.log("Node A is primary in term 1. Replicate a write to Node E that is not committed.");
+assert.eq(nodeA, rst.getPrimary());
+// Ensure Node E has a majority committed snapshot.
+assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "dummy"}));
+rst.awaitLastOpCommitted();
+stopServerReplication([nodeB, nodeC, nodeD]);
+assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "term 1, doc 1"}));
+rst.awaitReplication(undefined, undefined, [nodeE]);
+assert.eq(
+ 0,
+ nodeE.getDB(dbName)[collName].find({_id: "term 1, doc 1"}).readConcern("majority").itcount());
- jsTest.log("Disconnect Node E. Perform a new write.");
- nodeE.disconnect([nodeA, nodeB, nodeC, nodeD]);
- restartServerReplication([nodeB, nodeC, nodeD]);
- assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "term 1, doc 2"}));
+jsTest.log("Disconnect Node E. Perform a new write.");
+nodeE.disconnect([nodeA, nodeB, nodeC, nodeD]);
+restartServerReplication([nodeB, nodeC, nodeD]);
+assert.commandWorked(nodeA.getDB(dbName)[collName].insert({_id: "term 1, doc 2"}));
- jsTest.log("Step up Node B in term 2. Commit a new write.");
- // Ensure Node B is caught up, so that it can become primary.
- rst.awaitReplication(undefined, undefined, [nodeB]);
- assert.commandWorked(nodeB.adminCommand({replSetStepUp: 1}));
- rst.waitForState(nodeA, ReplSetTest.State.SECONDARY);
- assert.eq(nodeB, rst.getPrimary());
- assert.commandWorked(
- nodeB.getDB(dbName)[collName].insert({_id: "term 2"}, {writeConcern: {w: "majority"}}));
- // Node E might sync from Node A or Node B. Ensure they both have the new commit point.
- rst.awaitLastOpCommitted(undefined, [nodeA]);
+jsTest.log("Step up Node B in term 2. Commit a new write.");
+// Ensure Node B is caught up, so that it can become primary.
+rst.awaitReplication(undefined, undefined, [nodeB]);
+assert.commandWorked(nodeB.adminCommand({replSetStepUp: 1}));
+rst.waitForState(nodeA, ReplSetTest.State.SECONDARY);
+assert.eq(nodeB, rst.getPrimary());
+assert.commandWorked(
+ nodeB.getDB(dbName)[collName].insert({_id: "term 2"}, {writeConcern: {w: "majority"}}));
+// Node E might sync from Node A or Node B. Ensure they both have the new commit point.
+rst.awaitLastOpCommitted(undefined, [nodeA]);
- jsTest.log("Allow Node E to replicate the last write from term 1.");
- // The stopReplProducerOnDocument failpoint ensures that Node E stops replicating before
- // applying the document {msg: "new primary"}, which is the first document of term 2. This
- // depends on the oplog fetcher batch size being 1.
- assert.commandWorked(nodeE.adminCommand({
- configureFailPoint: "stopReplProducerOnDocument",
- mode: "alwaysOn",
- data: {document: {msg: "new primary"}}
- }));
- nodeE.reconnect([nodeA, nodeB, nodeC, nodeD]);
- assert.soon(() => {
- return nodeE.getDB(dbName)[collName].find({_id: "term 1, doc 2"}).itcount() === 1;
- });
- assert.eq(0, nodeE.getDB(dbName)[collName].find({_id: "term 2"}).itcount());
+jsTest.log("Allow Node E to replicate the last write from term 1.");
+// The stopReplProducerOnDocument failpoint ensures that Node E stops replicating before
+// applying the document {msg: "new primary"}, which is the first document of term 2. This
+// depends on the oplog fetcher batch size being 1.
+assert.commandWorked(nodeE.adminCommand({
+ configureFailPoint: "stopReplProducerOnDocument",
+ mode: "alwaysOn",
+ data: {document: {msg: "new primary"}}
+}));
+nodeE.reconnect([nodeA, nodeB, nodeC, nodeD]);
+assert.soon(() => {
+ return nodeE.getDB(dbName)[collName].find({_id: "term 1, doc 2"}).itcount() === 1;
+});
+assert.eq(0, nodeE.getDB(dbName)[collName].find({_id: "term 2"}).itcount());
- jsTest.log("Node E now knows that its first write is majority committed.");
- // It does not yet know that {_id: "term 1, doc 2"} is committed. Its last batch was {_id: "term
- // 1, doc 2"}. The sync source's lastOpCommitted was in term 2, so Node E updated its
- // lastOpCommitted to its lastApplied, which did not yet include {_id: "term 1, doc 2"}.
- assert.eq(1,
- nodeE.getDB(dbName)[collName]
- .find({_id: "term 1, doc 1"})
- .readConcern("majority")
- .itcount());
+jsTest.log("Node E now knows that its first write is majority committed.");
+// It does not yet know that {_id: "term 1, doc 2"} is committed. Its last batch was {_id: "term
+// 1, doc 2"}. The sync source's lastOpCommitted was in term 2, so Node E updated its
+// lastOpCommitted to its lastApplied, which did not yet include {_id: "term 1, doc 2"}.
+assert.eq(
+ 1,
+ nodeE.getDB(dbName)[collName].find({_id: "term 1, doc 1"}).readConcern("majority").itcount());
- assert.commandWorked(
- nodeE.adminCommand({configureFailPoint: "stopReplProducerOnDocument", mode: "off"}));
- rst.stopSet();
+assert.commandWorked(
+ nodeE.adminCommand({configureFailPoint: "stopReplProducerOnDocument", mode: "off"}));
+rst.stopSet();
}());
diff --git a/jstests/replsets/user_management_wc.js b/jstests/replsets/user_management_wc.js
index c3467d4f80c..f4e755b6f7e 100644
--- a/jstests/replsets/user_management_wc.js
+++ b/jstests/replsets/user_management_wc.js
@@ -9,135 +9,135 @@ load('jstests/multiVersion/libs/auth_helpers.js');
*/
(function() {
- "use strict";
-
- // TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
- TestData.disableImplicitSessions = true;
-
- var replTest = new ReplSetTest(
- {name: 'UserManagementWCSet', nodes: 3, settings: {chainingAllowed: false}});
- replTest.startSet();
- replTest.initiate();
-
- var master = replTest.getPrimary();
- var dbName = "user-management-wc-test";
- var db = master.getDB(dbName);
- var adminDB = master.getDB('admin');
-
- function dropUsersAndRoles() {
- db.dropUser('username');
- db.dropUser('user1');
- db.dropUser('user2');
- }
-
- var commands = [];
-
- commands.push({
- req: {createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles},
- setupFunc: function() {},
- confirmFunc: function() {
- assert(db.auth("username", "password"), "auth failed");
- assert(!db.auth("username", "passworda"), "auth should have failed");
- },
- admin: false
- });
-
- commands.push({
- req: {updateUser: 'username', pwd: 'password2', roles: jsTest.basicUserRoles},
- setupFunc: function() {
- db.runCommand({createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles});
- },
- confirmFunc: function() {
- assert(db.auth("username", "password2"), "auth failed");
- assert(!db.auth("username", "password"), "auth should have failed");
- },
- admin: false
- });
-
- commands.push({
- req: {dropUser: 'tempUser'},
- setupFunc: function() {
- db.runCommand({createUser: 'tempUser', pwd: 'password', roles: jsTest.basicUserRoles});
- assert(db.auth("tempUser", "password"), "auth failed");
- },
- confirmFunc: function() {
- assert(!db.auth("tempUser", "password"), "auth should have failed");
- },
- admin: false
- });
-
- commands.push({
- req: {
- _mergeAuthzCollections: 1,
- tempUsersCollection: 'admin.tempusers',
- tempRolesCollection: 'admin.temproles',
- db: "",
- drop: false
- },
- setupFunc: function() {
- adminDB.system.users.remove({});
- adminDB.system.roles.remove({});
- adminDB.createUser({user: 'lorax', pwd: 'pwd', roles: ['read']});
- adminDB.createRole({role: 'role1', roles: ['read'], privileges: []});
- adminDB.system.users.find().forEach(function(doc) {
- adminDB.tempusers.insert(doc);
- });
- adminDB.system.roles.find().forEach(function(doc) {
- adminDB.temproles.insert(doc);
- });
- adminDB.system.users.remove({});
- adminDB.system.roles.remove({});
-
- assert.eq(0, adminDB.system.users.find().itcount());
- assert.eq(0, adminDB.system.roles.find().itcount());
-
- db.createUser({user: 'lorax2', pwd: 'pwd', roles: ['readWrite']});
- db.createRole({role: 'role2', roles: ['readWrite'], privileges: []});
-
- assert.eq(1, adminDB.system.users.find().itcount());
- assert.eq(1, adminDB.system.roles.find().itcount());
- },
- confirmFunc: function() {
- assert.eq(2, adminDB.system.users.find().itcount());
- assert.eq(2, adminDB.system.roles.find().itcount());
- },
- admin: true
- });
-
- function assertUserManagementWriteConcernError(res) {
- assert.commandFailed(res);
- assert.commandWorkedIgnoringWriteConcernErrors(res);
- assertWriteConcernError(res);
- }
-
- function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 'majority', wtimeout: 25000};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropUsersAndRoles();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assert.commandWorked(res);
- assert(!res.writeConcernError,
- 'command on a full replicaset had writeConcernError: ' + tojson(res));
- cmd.confirmFunc();
- }
-
- function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {w: 15};
- jsTest.log("Testing " + tojson(cmd.req));
-
- dropUsersAndRoles();
- cmd.setupFunc();
- var res = runCommandCheckAdmin(db, cmd);
- assertUserManagementWriteConcernError(res);
- cmd.confirmFunc();
- }
-
- commands.forEach(function(cmd) {
- testValidWriteConcern(cmd);
- testInvalidWriteConcern(cmd);
- });
-
- replTest.stopSet();
+"use strict";
+
+// TODO SERVER-35447: Multiple users cannot be authenticated on one connection within a session.
+TestData.disableImplicitSessions = true;
+
+var replTest =
+ new ReplSetTest({name: 'UserManagementWCSet', nodes: 3, settings: {chainingAllowed: false}});
+replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+var dbName = "user-management-wc-test";
+var db = master.getDB(dbName);
+var adminDB = master.getDB('admin');
+
+function dropUsersAndRoles() {
+ db.dropUser('username');
+ db.dropUser('user1');
+ db.dropUser('user2');
+}
+
+var commands = [];
+
+commands.push({
+ req: {createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles},
+ setupFunc: function() {},
+ confirmFunc: function() {
+ assert(db.auth("username", "password"), "auth failed");
+ assert(!db.auth("username", "passworda"), "auth should have failed");
+ },
+ admin: false
+});
+
+commands.push({
+ req: {updateUser: 'username', pwd: 'password2', roles: jsTest.basicUserRoles},
+ setupFunc: function() {
+ db.runCommand({createUser: 'username', pwd: 'password', roles: jsTest.basicUserRoles});
+ },
+ confirmFunc: function() {
+ assert(db.auth("username", "password2"), "auth failed");
+ assert(!db.auth("username", "password"), "auth should have failed");
+ },
+ admin: false
+});
+
+commands.push({
+ req: {dropUser: 'tempUser'},
+ setupFunc: function() {
+ db.runCommand({createUser: 'tempUser', pwd: 'password', roles: jsTest.basicUserRoles});
+ assert(db.auth("tempUser", "password"), "auth failed");
+ },
+ confirmFunc: function() {
+ assert(!db.auth("tempUser", "password"), "auth should have failed");
+ },
+ admin: false
+});
+
+commands.push({
+ req: {
+ _mergeAuthzCollections: 1,
+ tempUsersCollection: 'admin.tempusers',
+ tempRolesCollection: 'admin.temproles',
+ db: "",
+ drop: false
+ },
+ setupFunc: function() {
+ adminDB.system.users.remove({});
+ adminDB.system.roles.remove({});
+ adminDB.createUser({user: 'lorax', pwd: 'pwd', roles: ['read']});
+ adminDB.createRole({role: 'role1', roles: ['read'], privileges: []});
+ adminDB.system.users.find().forEach(function(doc) {
+ adminDB.tempusers.insert(doc);
+ });
+ adminDB.system.roles.find().forEach(function(doc) {
+ adminDB.temproles.insert(doc);
+ });
+ adminDB.system.users.remove({});
+ adminDB.system.roles.remove({});
+
+ assert.eq(0, adminDB.system.users.find().itcount());
+ assert.eq(0, adminDB.system.roles.find().itcount());
+
+ db.createUser({user: 'lorax2', pwd: 'pwd', roles: ['readWrite']});
+ db.createRole({role: 'role2', roles: ['readWrite'], privileges: []});
+
+ assert.eq(1, adminDB.system.users.find().itcount());
+ assert.eq(1, adminDB.system.roles.find().itcount());
+ },
+ confirmFunc: function() {
+ assert.eq(2, adminDB.system.users.find().itcount());
+ assert.eq(2, adminDB.system.roles.find().itcount());
+ },
+ admin: true
+});
+
+function assertUserManagementWriteConcernError(res) {
+ assert.commandFailed(res);
+ assert.commandWorkedIgnoringWriteConcernErrors(res);
+ assertWriteConcernError(res);
+}
+
+function testValidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 'majority', wtimeout: 25000};
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ dropUsersAndRoles();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assert.commandWorked(res);
+ assert(!res.writeConcernError,
+ 'command on a full replicaset had writeConcernError: ' + tojson(res));
+ cmd.confirmFunc();
+}
+
+function testInvalidWriteConcern(cmd) {
+ cmd.req.writeConcern = {w: 15};
+ jsTest.log("Testing " + tojson(cmd.req));
+
+ dropUsersAndRoles();
+ cmd.setupFunc();
+ var res = runCommandCheckAdmin(db, cmd);
+ assertUserManagementWriteConcernError(res);
+ cmd.confirmFunc();
+}
+
+commands.forEach(function(cmd) {
+ testValidWriteConcern(cmd);
+ testInvalidWriteConcern(cmd);
+});
+
+replTest.stopSet();
})();
diff --git a/jstests/replsets/verify_sessions_expiration_rs.js b/jstests/replsets/verify_sessions_expiration_rs.js
index fbb5465e8f4..70c02f205c9 100644
--- a/jstests/replsets/verify_sessions_expiration_rs.js
+++ b/jstests/replsets/verify_sessions_expiration_rs.js
@@ -14,123 +14,127 @@
// replace it in the config.system.sessions collection.
(function() {
- "use strict";
-
- // This test makes assertions about the number of logical session records.
- TestData.disableImplicitSessions = true;
-
- load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
-
- const refresh = {refreshLogicalSessionCacheNow: 1};
- const startSession = {startSession: 1};
- const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
-
- function refreshSessionsAndVerifyCount(config, expectedCount) {
- config.runCommand(refresh);
- assert.eq(config.system.sessions.count(), expectedCount);
+"use strict";
+
+// This test makes assertions about the number of logical session records.
+TestData.disableImplicitSessions = true;
+
+load("jstests/libs/pin_getmore_cursor.js"); // For "withPinnedCursor".
+
+const refresh = {
+ refreshLogicalSessionCacheNow: 1
+};
+const startSession = {
+ startSession: 1
+};
+const failPointName = "waitAfterPinningCursorBeforeGetMoreBatch";
+
+function refreshSessionsAndVerifyCount(config, expectedCount) {
+ config.runCommand(refresh);
+ assert.eq(config.system.sessions.count(), expectedCount);
+}
+
+function getSessions(config) {
+ return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
+}
+
+const dbName = "test";
+const testCollName = "verify_sessions_find_get_more";
+
+let replTest = new ReplSetTest({name: 'refresh', nodes: 2});
+replTest.startSet();
+replTest.initiate();
+
+const primary = replTest.getPrimary();
+replTest.awaitSecondaryNodes();
+
+let db = primary.getDB(dbName);
+let config = primary.getDB("config");
+
+// 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
+for (let i = 0; i < 5; i++) {
+ let res = db.runCommand(startSession);
+ assert.commandWorked(res, "unable to start session");
+}
+refreshSessionsAndVerifyCount(config, 5);
+
+// Manually delete entries in config.system.sessions to simulate TTL expiration.
+assert.commandWorked(config.system.sessions.remove({}));
+refreshSessionsAndVerifyCount(config, 0);
+
+// 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
+// config.system.sessions collection.
+for (let i = 0; i < 10; i++) {
+ db[testCollName].insert({_id: i, a: i, b: 1});
+}
+
+let cursors = [];
+for (let i = 0; i < 5; i++) {
+ let session = db.getMongo().startSession({});
+ assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
+ "initialize the session");
+ cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
+ assert(cursors[i].hasNext());
+}
+
+refreshSessionsAndVerifyCount(config, 5);
+
+let sessionsCollectionArray;
+let lastUseValues = [];
+for (let i = 0; i < 3; i++) {
+ for (let j = 0; j < cursors.length; j++) {
+ cursors[j].next();
}
- function getSessions(config) {
- return config.system.sessions.aggregate([{'$listSessions': {allUsers: true}}]).toArray();
- }
-
- const dbName = "test";
- const testCollName = "verify_sessions_find_get_more";
-
- let replTest = new ReplSetTest({name: 'refresh', nodes: 2});
- replTest.startSet();
- replTest.initiate();
-
- const primary = replTest.getPrimary();
- replTest.awaitSecondaryNodes();
-
- let db = primary.getDB(dbName);
- let config = primary.getDB("config");
-
- // 1. Verify that sessions expire from config.system.sessions after the timeout has passed.
- for (let i = 0; i < 5; i++) {
- let res = db.runCommand(startSession);
- assert.commandWorked(res, "unable to start session");
- }
refreshSessionsAndVerifyCount(config, 5);
- // Manually delete entries in config.system.sessions to simulate TTL expiration.
- assert.commandWorked(config.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(config, 0);
-
- // 2. Verify that getMores after finds will update the 'lastUse' field on documents in the
- // config.system.sessions collection.
- for (let i = 0; i < 10; i++) {
- db[testCollName].insert({_id: i, a: i, b: 1});
- }
+ sessionsCollectionArray = getSessions(config);
- let cursors = [];
- for (let i = 0; i < 5; i++) {
- let session = db.getMongo().startSession({});
- assert.commandWorked(session.getDatabase("admin").runCommand({usersInfo: 1}),
- "initialize the session");
- cursors.push(session.getDatabase(dbName)[testCollName].find({b: 1}).batchSize(1));
- assert(cursors[i].hasNext());
- }
-
- refreshSessionsAndVerifyCount(config, 5);
-
- let sessionsCollectionArray;
- let lastUseValues = [];
- for (let i = 0; i < 3; i++) {
- for (let j = 0; j < cursors.length; j++) {
- cursors[j].next();
+ if (i == 0) {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ lastUseValues.push(sessionsCollectionArray[j].lastUse);
}
-
- refreshSessionsAndVerifyCount(config, 5);
-
- sessionsCollectionArray = getSessions(config);
-
- if (i == 0) {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- lastUseValues.push(sessionsCollectionArray[j].lastUse);
- }
- } else {
- for (let j = 0; j < sessionsCollectionArray.length; j++) {
- assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
- lastUseValues[j] = sessionsCollectionArray[j].lastUse;
- }
+ } else {
+ for (let j = 0; j < sessionsCollectionArray.length; j++) {
+ assert.gt(sessionsCollectionArray[j].lastUse, lastUseValues[j]);
+ lastUseValues[j] = sessionsCollectionArray[j].lastUse;
}
}
-
- // 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
- // cursors.
- assert.commandWorked(config.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(config, 0);
-
- for (let i = 0; i < cursors.length; i++) {
- assert.commandFailedWithCode(
- db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
- ErrorCodes.CursorNotFound,
- 'expected getMore to fail because the cursor was killed');
- }
-
- // 4. Verify that an expired session (simulated by manual deletion) that has a currently running
- // operation will be vivified during the logical session cache refresh.
- let pinnedCursorSession = db.getMongo().startSession();
- let pinnedCursorDB = pinnedCursorSession.getDatabase(dbName);
-
- withPinnedCursor({
- conn: primary,
- db: pinnedCursorDB,
- assertFunction: (cursorId, coll) => {
- assert.commandWorked(config.system.sessions.remove({}));
- refreshSessionsAndVerifyCount(config, 1);
-
- let db = coll.getDB();
- assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
- },
- runGetMoreFunc: () => {
- db.runCommand({getMore: cursorId, collection: collName});
- },
- failPointName: failPointName
+}
+
+// 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
+// cursors.
+assert.commandWorked(config.system.sessions.remove({}));
+refreshSessionsAndVerifyCount(config, 0);
+
+for (let i = 0; i < cursors.length; i++) {
+ assert.commandFailedWithCode(
+ db.runCommand({getMore: cursors[i]._cursor._cursorid, collection: testCollName}),
+ ErrorCodes.CursorNotFound,
+ 'expected getMore to fail because the cursor was killed');
+}
+
+// 4. Verify that an expired session (simulated by manual deletion) that has a currently running
+// operation will be vivified during the logical session cache refresh.
+let pinnedCursorSession = db.getMongo().startSession();
+let pinnedCursorDB = pinnedCursorSession.getDatabase(dbName);
+
+withPinnedCursor({
+ conn: primary,
+ db: pinnedCursorDB,
+ assertFunction: (cursorId, coll) => {
+ assert.commandWorked(config.system.sessions.remove({}));
+ refreshSessionsAndVerifyCount(config, 1);
+
+ let db = coll.getDB();
+ assert.commandWorked(db.runCommand({killCursors: coll.getName(), cursors: [cursorId]}));
+ },
+ runGetMoreFunc: () => {
+ db.runCommand({getMore: cursorId, collection: collName});
},
- /* assertEndCounts */ false);
+ failPointName: failPointName
+},
+ /* assertEndCounts */ false);
- replTest.stopSet();
+replTest.stopSet();
})();
diff --git a/jstests/replsets/view_catalog_oplog_entries.js b/jstests/replsets/view_catalog_oplog_entries.js
index d245a84b897..a39a3f521de 100644
--- a/jstests/replsets/view_catalog_oplog_entries.js
+++ b/jstests/replsets/view_catalog_oplog_entries.js
@@ -4,45 +4,45 @@
*/
(function() {
- "use strict";
+"use strict";
- const dbName = "view_catalog_oplog_entries";
- const collName = "test_coll";
- const viewName = "test_view";
+const dbName = "view_catalog_oplog_entries";
+const collName = "test_coll";
+const viewName = "test_view";
- const replTest = new ReplSetTest({nodes: 1});
- replTest.startSet();
- replTest.initiate();
+const replTest = new ReplSetTest({nodes: 1});
+replTest.startSet();
+replTest.initiate();
- const primary = replTest.getPrimary();
+const primary = replTest.getPrimary();
- assert.commandWorked(primary.getDB(dbName)[collName].insert({a: 1}));
+assert.commandWorked(primary.getDB(dbName)[collName].insert({a: 1}));
- // Create the view.
- assert.commandWorked(primary.getDB(dbName).createView(viewName, collName, []));
+// Create the view.
+assert.commandWorked(primary.getDB(dbName).createView(viewName, collName, []));
- // Modify the view with the "collMod" command.
- assert.commandWorked(primary.getDB(dbName).runCommand(
- {collMod: viewName, viewOn: collName, pipeline: [{$project: {a: 1}}]}));
+// Modify the view with the "collMod" command.
+assert.commandWorked(primary.getDB(dbName).runCommand(
+ {collMod: viewName, viewOn: collName, pipeline: [{$project: {a: 1}}]}));
- // There should be exactly one insert into "system.views" for the view creation...
- const oplog = primary.getDB("local").oplog.rs;
- const createViewOplogEntry = oplog.find({op: "i", ns: (dbName + ".system.views")}).toArray();
- assert.eq(createViewOplogEntry.length, 1);
- assert(createViewOplogEntry[0].hasOwnProperty("ui"),
- "Oplog entry for view creation missing UUID for view catalog: " +
- tojson(createViewOplogEntry[0]));
- const viewCatalogUUID = createViewOplogEntry[0].ui;
+// There should be exactly one insert into "system.views" for the view creation...
+const oplog = primary.getDB("local").oplog.rs;
+const createViewOplogEntry = oplog.find({op: "i", ns: (dbName + ".system.views")}).toArray();
+assert.eq(createViewOplogEntry.length, 1);
+assert(createViewOplogEntry[0].hasOwnProperty("ui"),
+ "Oplog entry for view creation missing UUID for view catalog: " +
+ tojson(createViewOplogEntry[0]));
+const viewCatalogUUID = createViewOplogEntry[0].ui;
- // ...and exactly one update on "system.views" for the view collMod.
- const modViewOplogEntry = oplog.find({op: "u", ns: (dbName + ".system.views")}).toArray();
- assert.eq(modViewOplogEntry.length, 1);
- assert(modViewOplogEntry[0].hasOwnProperty("ui"),
- "Oplog entry for view modification missing UUID for view catalog: " +
- tojson(modViewOplogEntry[0]));
+// ...and exactly one update on "system.views" for the view collMod.
+const modViewOplogEntry = oplog.find({op: "u", ns: (dbName + ".system.views")}).toArray();
+assert.eq(modViewOplogEntry.length, 1);
+assert(modViewOplogEntry[0].hasOwnProperty("ui"),
+ "Oplog entry for view modification missing UUID for view catalog: " +
+ tojson(modViewOplogEntry[0]));
- // Both entries should have the same UUID.
- assert.eq(viewCatalogUUID, modViewOplogEntry[0].ui);
+// Both entries should have the same UUID.
+assert.eq(viewCatalogUUID, modViewOplogEntry[0].ui);
- replTest.stopSet();
+replTest.stopSet();
}());
diff --git a/jstests/replsets/write_concern_after_stepdown.js b/jstests/replsets/write_concern_after_stepdown.js
index 250acb9e93d..b54e62e8965 100644
--- a/jstests/replsets/write_concern_after_stepdown.js
+++ b/jstests/replsets/write_concern_after_stepdown.js
@@ -3,100 +3,100 @@
* primary to incorrectly acknowledge a w:majority write that's about to be rolled back.
*/
(function() {
- 'use strict';
-
- load("jstests/replsets/rslib.js");
- load("jstests/libs/write_concern_util.js");
-
- var name = "writeConcernStepDownAndBackUp";
- var dbName = "wMajorityCheck";
- var collName = "stepdownAndBackUp";
-
- var rst = new ReplSetTest({
- name: name,
- nodes: [
- {},
- {},
- {rsConfig: {priority: 0}},
- ],
- useBridge: true
+'use strict';
+
+load("jstests/replsets/rslib.js");
+load("jstests/libs/write_concern_util.js");
+
+var name = "writeConcernStepDownAndBackUp";
+var dbName = "wMajorityCheck";
+var collName = "stepdownAndBackUp";
+
+var rst = new ReplSetTest({
+ name: name,
+ nodes: [
+ {},
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+ useBridge: true
+});
+var nodes = rst.startSet();
+rst.initiate();
+
+function waitForPrimary(node) {
+ assert.soon(function() {
+ return node.adminCommand('ismaster').ismaster;
});
- var nodes = rst.startSet();
- rst.initiate();
-
- function waitForPrimary(node) {
- assert.soon(function() {
- return node.adminCommand('ismaster').ismaster;
- });
- }
-
- // SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the correct
- // size for faster startup, so nodes[0] is always the first primary.
- jsTestLog("Make sure node 0 is primary.");
- var primary = rst.getPrimary();
- var secondaries = rst.getSecondaries();
- assert.eq(nodes[0], primary);
- // Wait for all data bearing nodes to get up to date.
- assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
- {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
-
- // Stop the secondaries from replicating.
- stopServerReplication(secondaries);
- // Stop the primary from being able to complete stepping down.
- assert.commandWorked(
- nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'}));
-
- jsTestLog("Do w:majority write that will block waiting for replication.");
- var doMajorityWrite = function() {
- // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
- // one that shouldn't be closed when the node steps down. This makes it easier to detect
- // the error returned by the write concern failure.
- assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
-
- var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
- writeConcern: {w: 'majority', wtimeout: 600000}
- });
- assert.writeErrorWithCode(
- res, [ErrorCodes.PrimarySteppedDown, ErrorCodes.InterruptedDueToReplStateChange]);
- };
-
- var joinMajorityWriter = startParallelShell(doMajorityWrite, nodes[0].port);
-
- jsTest.log("Disconnect primary from all secondaries");
- nodes[0].disconnect(nodes[1]);
- nodes[0].disconnect(nodes[2]);
-
- jsTest.log("Wait for a new primary to be elected");
- // Allow the secondaries to replicate again.
- restartServerReplication(secondaries);
-
- waitForPrimary(nodes[1]);
-
- jsTest.log("Do a write to the new primary");
- assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
- {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
-
- jsTest.log("Reconnect the old primary to the rest of the nodes");
- // Only allow the old primary to connect to the other nodes, not the other way around.
- // This is so that the old priamry will detect that it needs to step down and step itself down,
- // rather than one of the other nodes detecting this and sending it a replSetStepDown command,
- // which would cause the old primary to kill all operations and close all connections, making
- // the way that the insert in the parallel shell fails be nondeterministic. Rather than
- // handling all possible failure modes in the parallel shell, allowing heartbeat connectivity in
- // only one direction makes it easier for the test to fail deterministically.
- nodes[1].acceptConnectionsFrom(nodes[0]);
- nodes[2].acceptConnectionsFrom(nodes[0]);
-
- // Allow the old primary to finish stepping down so that shutdown can finish.
- assert.commandWorked(
- nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'off'}));
-
- joinMajorityWriter();
-
- // Node 0 will go into rollback after it steps down. We want to wait for that to happen, and
- // then complete, in order to get a clean shutdown.
- jsTestLog("Waiting for node 0 to roll back the failed write.");
- rst.awaitReplication();
-
- rst.stopSet();
+}
+
+// SERVER-20844 ReplSetTest starts up a single node replica set then reconfigures to the correct
+// size for faster startup, so nodes[0] is always the first primary.
+jsTestLog("Make sure node 0 is primary.");
+var primary = rst.getPrimary();
+var secondaries = rst.getSecondaries();
+assert.eq(nodes[0], primary);
+// Wait for all data bearing nodes to get up to date.
+assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
+ {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
+
+// Stop the secondaries from replicating.
+stopServerReplication(secondaries);
+// Stop the primary from being able to complete stepping down.
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'alwaysOn'}));
+
+jsTestLog("Do w:majority write that will block waiting for replication.");
+var doMajorityWrite = function() {
+ // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
+ // one that shouldn't be closed when the node steps down. This makes it easier to detect
+ // the error returned by the write concern failure.
+ assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
+
+ var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
+ writeConcern: {w: 'majority', wtimeout: 600000}
+ });
+ assert.writeErrorWithCode(
+ res, [ErrorCodes.PrimarySteppedDown, ErrorCodes.InterruptedDueToReplStateChange]);
+};
+
+var joinMajorityWriter = startParallelShell(doMajorityWrite, nodes[0].port);
+
+jsTest.log("Disconnect primary from all secondaries");
+nodes[0].disconnect(nodes[1]);
+nodes[0].disconnect(nodes[2]);
+
+jsTest.log("Wait for a new primary to be elected");
+// Allow the secondaries to replicate again.
+restartServerReplication(secondaries);
+
+waitForPrimary(nodes[1]);
+
+jsTest.log("Do a write to the new primary");
+assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
+ {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
+
+jsTest.log("Reconnect the old primary to the rest of the nodes");
+// Only allow the old primary to connect to the other nodes, not the other way around.
+// This is so that the old priamry will detect that it needs to step down and step itself down,
+// rather than one of the other nodes detecting this and sending it a replSetStepDown command,
+// which would cause the old primary to kill all operations and close all connections, making
+// the way that the insert in the parallel shell fails be nondeterministic. Rather than
+// handling all possible failure modes in the parallel shell, allowing heartbeat connectivity in
+// only one direction makes it easier for the test to fail deterministically.
+nodes[1].acceptConnectionsFrom(nodes[0]);
+nodes[2].acceptConnectionsFrom(nodes[0]);
+
+// Allow the old primary to finish stepping down so that shutdown can finish.
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'blockHeartbeatStepdown', mode: 'off'}));
+
+joinMajorityWriter();
+
+// Node 0 will go into rollback after it steps down. We want to wait for that to happen, and
+// then complete, in order to get a clean shutdown.
+jsTestLog("Waiting for node 0 to roll back the failed write.");
+rst.awaitReplication();
+
+rst.stopSet();
}());
diff --git a/jstests/replsets/write_concern_after_stepdown_and_stepup.js b/jstests/replsets/write_concern_after_stepdown_and_stepup.js
index 19143230375..daa143aa901 100644
--- a/jstests/replsets/write_concern_after_stepdown_and_stepup.js
+++ b/jstests/replsets/write_concern_after_stepdown_and_stepup.js
@@ -4,118 +4,118 @@
* stale primary is re-elected primary before waiting for the write concern acknowledgement.
*/
(function() {
- 'use strict';
-
- load("jstests/replsets/rslib.js");
- load("jstests/libs/write_concern_util.js");
-
- var name = "writeConcernStepDownAndBackUp";
- var dbName = "wMajorityCheck";
- var collName = "stepdownAndBackUp";
-
- var rst = new ReplSetTest({
- name: name,
- nodes: [
- {},
- {},
- {rsConfig: {priority: 0}},
- ],
- useBridge: true
+'use strict';
+
+load("jstests/replsets/rslib.js");
+load("jstests/libs/write_concern_util.js");
+
+var name = "writeConcernStepDownAndBackUp";
+var dbName = "wMajorityCheck";
+var collName = "stepdownAndBackUp";
+
+var rst = new ReplSetTest({
+ name: name,
+ nodes: [
+ {},
+ {},
+ {rsConfig: {priority: 0}},
+ ],
+ useBridge: true
+});
+var nodes = rst.startSet();
+rst.initiate();
+
+function waitForPrimary(node) {
+ assert.soon(function() {
+ return node.adminCommand('ismaster').ismaster;
});
- var nodes = rst.startSet();
- rst.initiate();
+}
- function waitForPrimary(node) {
- assert.soon(function() {
- return node.adminCommand('ismaster').ismaster;
- });
- }
-
- function stepUp(node) {
- var primary = rst.getPrimary();
- if (primary != node) {
- assert.commandWorked(primary.adminCommand({replSetStepDown: 60 * 5}));
- }
- waitForPrimary(node);
- }
-
- jsTestLog("Make sure node 0 is primary.");
- stepUp(nodes[0]);
+function stepUp(node) {
var primary = rst.getPrimary();
- var secondaries = rst.getSecondaries();
- assert.eq(nodes[0], primary);
- // Wait for all data bearing nodes to get up to date.
- assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
- {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
-
- // Stop the secondaries from replicating.
- stopServerReplication(secondaries);
- // Stop the primary from calling into awaitReplication()
- assert.commandWorked(nodes[0].adminCommand(
- {configureFailPoint: 'hangBeforeWaitingForWriteConcern', mode: 'alwaysOn'}));
-
- jsTestLog("Do w:majority write that won't enter awaitReplication() until after the primary " +
- "has stepped down and back up");
- var doMajorityWrite = function() {
- // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
- // one that shouldn't be closed when the node steps down. This simulates the scenario where
- // the write was coming from a mongos.
- assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
-
- var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
- writeConcern: {w: 'majority'}
- });
- assert.writeErrorWithCode(res, ErrorCodes.InterruptedDueToReplStateChange);
- };
-
- var joinMajorityWriter = startParallelShell(doMajorityWrite, nodes[0].port);
-
- jsTest.log("Disconnect primary from all secondaries");
- nodes[0].disconnect(nodes[1]);
- nodes[0].disconnect(nodes[2]);
-
- jsTest.log("Wait for a new primary to be elected");
- // Allow the secondaries to replicate again.
- restartServerReplication(secondaries);
-
- waitForPrimary(nodes[1]);
-
- jsTest.log("Do a write to the new primary");
- assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
- {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
-
- jsTest.log("Reconnect the old primary to the rest of the nodes");
- nodes[0].reconnect(nodes[1]);
- nodes[0].reconnect(nodes[2]);
-
- jsTest.log("Wait for the old primary to step down, roll back its write, and apply the " +
- "new writes from the new primary");
- waitForState(nodes[0], ReplSetTest.State.SECONDARY);
- rst.awaitReplication();
-
- // At this point all 3 nodes should have the same data
- assert.soonNoExcept(function() {
- nodes.forEach(function(node) {
- assert.eq(null,
- node.getDB(dbName).getCollection(collName).findOne({a: 2}),
- "Node " + node.host + " contained op that should have been rolled back");
- assert.neq(null,
- node.getDB(dbName).getCollection(collName).findOne({a: 3}),
- "Node " + node.host +
- " was missing op from branch of history that should have persisted");
- });
- return true;
+ if (primary != node) {
+ assert.commandWorked(primary.adminCommand({replSetStepDown: 60 * 5}));
+ }
+ waitForPrimary(node);
+}
+
+jsTestLog("Make sure node 0 is primary.");
+stepUp(nodes[0]);
+var primary = rst.getPrimary();
+var secondaries = rst.getSecondaries();
+assert.eq(nodes[0], primary);
+// Wait for all data bearing nodes to get up to date.
+assert.writeOK(nodes[0].getDB(dbName).getCollection(collName).insert(
+ {a: 1}, {writeConcern: {w: 3, wtimeout: rst.kDefaultTimeoutMS}}));
+
+// Stop the secondaries from replicating.
+stopServerReplication(secondaries);
+// Stop the primary from calling into awaitReplication()
+assert.commandWorked(nodes[0].adminCommand(
+ {configureFailPoint: 'hangBeforeWaitingForWriteConcern', mode: 'alwaysOn'}));
+
+jsTestLog("Do w:majority write that won't enter awaitReplication() until after the primary " +
+ "has stepped down and back up");
+var doMajorityWrite = function() {
+ // Run ismaster command with 'hangUpOnStepDown' set to false to mark this connection as
+ // one that shouldn't be closed when the node steps down. This simulates the scenario where
+ // the write was coming from a mongos.
+ assert.commandWorked(db.adminCommand({ismaster: 1, hangUpOnStepDown: false}));
+
+ var res = db.getSiblingDB('wMajorityCheck').stepdownAndBackUp.insert({a: 2}, {
+ writeConcern: {w: 'majority'}
+ });
+ assert.writeErrorWithCode(res, ErrorCodes.InterruptedDueToReplStateChange);
+};
+
+var joinMajorityWriter = startParallelShell(doMajorityWrite, nodes[0].port);
+
+jsTest.log("Disconnect primary from all secondaries");
+nodes[0].disconnect(nodes[1]);
+nodes[0].disconnect(nodes[2]);
+
+jsTest.log("Wait for a new primary to be elected");
+// Allow the secondaries to replicate again.
+restartServerReplication(secondaries);
+
+waitForPrimary(nodes[1]);
+
+jsTest.log("Do a write to the new primary");
+assert.writeOK(nodes[1].getDB(dbName).getCollection(collName).insert(
+ {a: 3}, {writeConcern: {w: 2, wtimeout: rst.kDefaultTimeoutMS}}));
+
+jsTest.log("Reconnect the old primary to the rest of the nodes");
+nodes[0].reconnect(nodes[1]);
+nodes[0].reconnect(nodes[2]);
+
+jsTest.log("Wait for the old primary to step down, roll back its write, and apply the " +
+ "new writes from the new primary");
+waitForState(nodes[0], ReplSetTest.State.SECONDARY);
+rst.awaitReplication();
+
+// At this point all 3 nodes should have the same data
+assert.soonNoExcept(function() {
+ nodes.forEach(function(node) {
+ assert.eq(null,
+ node.getDB(dbName).getCollection(collName).findOne({a: 2}),
+ "Node " + node.host + " contained op that should have been rolled back");
+ assert.neq(null,
+ node.getDB(dbName).getCollection(collName).findOne({a: 3}),
+ "Node " + node.host +
+ " was missing op from branch of history that should have persisted");
});
+ return true;
+});
- jsTest.log("Make the original primary become primary once again");
- stepUp(nodes[0]);
+jsTest.log("Make the original primary become primary once again");
+stepUp(nodes[0]);
- jsTest.log("Unblock the thread waiting for replication of the now rolled-back write, ensure " +
- "that the write concern failed");
- assert.commandWorked(nodes[0].adminCommand(
- {configureFailPoint: 'hangBeforeWaitingForWriteConcern', mode: 'off'}));
+jsTest.log("Unblock the thread waiting for replication of the now rolled-back write, ensure " +
+ "that the write concern failed");
+assert.commandWorked(
+ nodes[0].adminCommand({configureFailPoint: 'hangBeforeWaitingForWriteConcern', mode: 'off'}));
- joinMajorityWriter();
+joinMajorityWriter();
- rst.stopSet();
+rst.stopSet();
}());